diff -Nru alglib-3.10.0/debian/changelog alglib-3.16.0/debian/changelog --- alglib-3.10.0/debian/changelog 2015-08-26 20:21:05.000000000 +0000 +++ alglib-3.16.0/debian/changelog 2020-08-31 21:04:58.000000000 +0000 @@ -1,3 +1,125 @@ +alglib (3.16.0-1~16.04.sav0) xenial; urgency=medium + + * Backport to Xenial + * debian/control: Set debhelper-compat (= 10) BD (LP highest for Xenial) + + -- Rob Savoury Mon, 31 Aug 2020 14:04:58 -0700 + +alglib (3.16.0-1) unstable; urgency=medium + + * [d432303] New upstream version 3.16.0 + * [6fd12f6] Set compat-level 12 + * [c5bc566] Set Standards-Version: 4.4.1 + * [e7911f7] Trim trailing whitespace. + * [139e7f8] Use secure URI in debian/watch. + * [96bf324] Use secure URI in Homepage field. + + -- Anton Gladky Thu, 02 Jan 2020 21:18:29 +0100 + +alglib (3.14.0-3) unstable; urgency=medium + + * [4426d48] Fix autopkgtest + + -- Anton Gladky Mon, 03 Dec 2018 23:38:48 +0100 + +alglib (3.14.0-2) unstable; urgency=medium + + * [629add4] Disable lsfit test, failing sometimes on mipsel + + -- Anton Gladky Sun, 02 Dec 2018 20:07:43 +0100 + +alglib (3.14.0-1) unstable; urgency=medium + + * Upload into unstable. + + -- Anton Gladky Fri, 30 Nov 2018 22:02:13 +0100 + +alglib (3.14.0-1~exp3) experimental; urgency=medium + + * [9dc85d6] One more try to disable minlm test + + -- Anton Gladky Sun, 18 Nov 2018 00:20:25 +0100 + +alglib (3.14.0-1~exp2) experimental; urgency=medium + + [ Jelmer Vernooij ] + * Trim trailing whitespace. + + [ Anton Gladky ] + * [1b70352] Disable unreliable minlm test + + -- Anton Gladky Sat, 17 Nov 2018 21:35:16 +0100 + +alglib (3.14.0-1~exp1) experimental; urgency=medium + + * [cb4e129] Fix line endings + * [e4df8e1] New upstream version 3.14.0 + * [7d9ae4e] Refresh patch, remove old one + * [1c64cc9] Update VCS-fields + * [3bee2b6] Update Standards-Version to 4.2.1 + * [8131f1a] Set compat level to 11 + * [a01bdc9] Remove Testsuite-field from d/control + * [2626a68] Rename the package due to so-version increase + * [bd75b15] Replace priority extra by optional + + -- Anton Gladky Sun, 30 Sep 2018 22:44:11 +0200 + +alglib (3.11.0-3) unstable; urgency=medium + + * Team upload. + * [a508c14] d/rules: Add -ffloat-store flag to more 32 bit archs + + -- Gert Wollny Tue, 10 Oct 2017 12:24:58 +0000 + +alglib (3.11.0-2) unstable; urgency=medium + + * Team upload. + * [6148f06] d/rules: add -ffloat-store for i386, Closes: #877065 + * [ea994a2] d/control: standards 4.1.1, no changes needed + + -- Gert Wollny Tue, 10 Oct 2017 08:58:45 +0000 + +alglib (3.11.0-1) unstable; urgency=medium + + * Upload into unstable. + + -- Anton Gladky Fri, 14 Jul 2017 22:26:42 +0200 + +alglib (3.11.0-1~exp4) experimental; urgency=medium + + * [ef71752] Disable auto_tests on mips (sometimes fails du to timeouts) + + -- Anton Gladky Wed, 12 Jul 2017 08:31:18 +0200 + +alglib (3.11.0-1~exp3) experimental; urgency=medium + + * [cf5caa6] One more try to disable minlm test. + + -- Anton Gladky Tue, 11 Jul 2017 20:41:08 +0200 + +alglib (3.11.0-1~exp2) experimental; urgency=medium + + * [b981969] Disable minlm test. + + -- Anton Gladky Fri, 07 Jul 2017 23:00:44 +0200 + +alglib (3.11.0-1~exp1) experimental; urgency=medium + + * [6b25eb3] Remove disable-test patch. + * [08fd12a] Switch to the next upstream version 3.11 + * [4f42229] Switch to the compat level 10. + * [2bde9bd] Enable tests on all platforms. + * [1ba52a0] New upstream version 3.11.0 + + -- Anton Gladky Fri, 19 May 2017 15:20:28 +0200 + +alglib (3.10.0-2) unstable; urgency=medium + + * [ef11ade] Apply cme fix dpkg. + * [ead797e] Drop -dbg-package. + + -- Anton Gladky Fri, 26 Aug 2016 22:56:37 +0200 + alglib (3.10.0-1) unstable; urgency=medium * [f1b452c] Imported Upstream version 3.10.0 @@ -90,7 +212,7 @@ alglib (3.8.0-2) unstable; urgency=low - * [88839b9] Disable minlm and pspline tests, because they are + * [88839b9] Disable minlm and pspline tests, because they are failing on some platforms. -- Anton Gladky Wed, 04 Sep 2013 21:23:05 +0200 @@ -126,7 +248,7 @@ alglib (3.7.0-4) unstable; urgency=low - * [ed6f21f] Disable some (they were not properly disabled during the + * [ed6f21f] Disable some (they were not properly disabled during the previous uploads). -- Anton Gladky Thu, 16 May 2013 20:52:25 +0200 @@ -143,7 +265,7 @@ alglib (3.7.0-2) unstable; urgency=low * Team upload. - * Disable minlm test, as it fails on many archs. + * Disable minlm test, as it fails on many archs. -- Anton Gladky Thu, 16 May 2013 00:06:54 +0200 @@ -159,7 +281,7 @@ * Team upload * [24826b4] Imported Upstream version 3.7.0 * [0d2f7bc] Replace autotools build by cmake. - * [679557b] Replace 2.6.0 version by 3. + * [679557b] Replace 2.6.0 version by 3. Implement multiarch. * [2b2d7d0] Use compat-level 9. * [9ff7f12] Update copyright-file. diff -Nru alglib-3.10.0/debian/compat alglib-3.16.0/debian/compat --- alglib-3.10.0/debian/compat 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -9 diff -Nru alglib-3.10.0/debian/control alglib-3.16.0/debian/control --- alglib-3.10.0/debian/control 2015-08-26 05:47:06.000000000 +0000 +++ alglib-3.16.0/debian/control 2020-08-31 21:04:58.000000000 +0000 @@ -4,20 +4,19 @@ Gudjon I. Gudjonsson , Anton Gladky Section: libs -Testsuite: autopkgtest Priority: optional Build-Depends: cmake, - debhelper (>= 9) -Standards-Version: 3.9.6 -Vcs-Browser: https://anonscm.debian.org/cgit/debian-science/packages/alglib.git -Vcs-Git: git://anonscm.debian.org/debian-science/packages/alglib.git -Homepage: http://www.alglib.net/ + debhelper-compat (= 10) +Standards-Version: 4.4.1 +Vcs-Browser: https://salsa.debian.org/science-team/alglib +Vcs-Git: https://salsa.debian.org/science-team/alglib.git +Homepage: https://www.alglib.net/ Package: libalglib-dev Architecture: any Section: libdevel -Priority: extra -Depends: libalglib3.10 (= ${binary:Version}), +Priority: optional +Depends: libalglib3.14 (= ${binary:Version}), ${misc:Depends} Description: Development files for the alglib library ALGLIB is a cross-platform numerical analysis and data processing library. @@ -38,7 +37,7 @@ This package contains the development files (headers and documentation) for ALGLIB. -Package: libalglib3.10 +Package: libalglib3.14 Architecture: any Multi-Arch: same Depends: ${misc:Depends}, @@ -59,29 +58,3 @@ * Special functions * Statistics (descriptive statistics, hypothesis testing) * Data analysis (classification/regression, including neural networks) - -Package: libalglib3.10-dbg -Architecture: any -Multi-Arch: same -Section: debug -Priority: extra -Depends: libalglib3.10 (= ${binary:Version}), - ${misc:Depends} -Pre-Depends: ${misc:Pre-Depends} -Description: Debugging symbols for the alglib library - ALGLIB is a cross-platform numerical analysis and data processing library. - This package support C++. ALGLIB features include: - . - * Linear algebra (direct algorithms, EVD/SVD) - * Solvers (linear and nonlinear) - * Interpolation - * Optimization - * Fast Fourier transforms - * Numerical integration - * Linear and nonlinear least-squares fitting - * Ordinary differential equations - * Special functions - * Statistics (descriptive statistics, hypothesis testing) - * Data analysis (classification/regression, including neural networks) - . - This package contains the debugging symbols for ALGLIB. diff -Nru alglib-3.10.0/debian/copyright alglib-3.16.0/debian/copyright --- alglib-3.10.0/debian/copyright 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/copyright 2020-01-02 20:18:12.000000000 +0000 @@ -1,4 +1,4 @@ -Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: alglib Source: www.alglib.net License: GPL-2.0+ diff -Nru alglib-3.10.0/debian/libalglib3.10.install alglib-3.16.0/debian/libalglib3.10.install --- alglib-3.10.0/debian/libalglib3.10.install 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/libalglib3.10.install 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -usr/lib/*/libalglib*.so.* diff -Nru alglib-3.10.0/debian/libalglib3.14.install alglib-3.16.0/debian/libalglib3.14.install --- alglib-3.10.0/debian/libalglib3.14.install 1970-01-01 00:00:00.000000000 +0000 +++ alglib-3.16.0/debian/libalglib3.14.install 2020-01-02 20:16:34.000000000 +0000 @@ -0,0 +1 @@ +usr/lib/*/libalglib*.so.* diff -Nru alglib-3.10.0/debian/patches/01_add_cmake.patch alglib-3.16.0/debian/patches/01_add_cmake.patch --- alglib-3.10.0/debian/patches/01_add_cmake.patch 2015-08-26 05:44:26.000000000 +0000 +++ alglib-3.16.0/debian/patches/01_add_cmake.patch 2020-01-02 20:16:34.000000000 +0000 @@ -2,10 +2,10 @@ Author: Anton Gladky Last-Update: 2015-04-16 -Index: cpp/CMakeLists.txt +Index: alglib/CMakeLists.txt =================================================================== --- /dev/null -+++ cpp/CMakeLists.txt ++++ alglib/CMakeLists.txt @@ -0,0 +1,31 @@ +project(alglib CXX) +cmake_minimum_required(VERSION 2.8) diff -Nru alglib-3.10.0/debian/patches/10_disable_minlm_test.patch alglib-3.16.0/debian/patches/10_disable_minlm_test.patch --- alglib-3.10.0/debian/patches/10_disable_minlm_test.patch 2015-08-26 05:44:10.000000000 +0000 +++ alglib-3.16.0/debian/patches/10_disable_minlm_test.patch 2020-01-02 20:18:29.000000000 +0000 @@ -1,16 +1,36 @@ -Description: disable minlm test, because it fails sometimes on some platforms. +Description: Disable minlm test + The test is unreliable on some platforms Author: Anton Gladky -Last-Update: 2015-05-29 +Last-Update: 2018-11-17 -Index: cpp/tests/test_c.cpp +Index: alglib/tests/test_c.cpp =================================================================== ---- cpp.orig/tests/test_c.cpp -+++ cpp/tests/test_c.cpp -@@ -93354,7 +93354,6 @@ _s_testrecord unittests[] = - {"spline1d",testspline1d,_pexec_testspline1d}, - {"normestimator",testnormestimator,_pexec_testnormestimator}, - {"minqp",testminqp,_pexec_testminqp}, -- {"minlm",testminlm,_pexec_testminlm}, - {"lsfit",testlsfit,_pexec_testlsfit}, - {"parametric",testparametric,_pexec_testparametric}, - {"linlsqr",testlinlsqr,_pexec_testlinlsqr}, +--- alglib.orig/tests/test_c.cpp ++++ alglib/tests/test_c.cpp +@@ -700,8 +700,8 @@ void testother(ae_bool* err, ae_state *_ + + + +-ae_bool testminlm(ae_bool silent, ae_state *_state); +-ae_bool _pexec_testminlm(ae_bool silent, ae_state *_state); ++// ae_bool testminlm(ae_bool silent, ae_state *_state); ++// ae_bool _pexec_testminlm(ae_bool silent, ae_state *_state); + + + +@@ -128934,7 +128934,6 @@ _s_testrecord unittests[] = + {"minbc",testminbc}, + {"minns",testminns}, + {"mincg",testmincg}, +- {"minlm",testminlm}, + {"evd",testevd}, + {"basestat",testbasestat}, + {"pca",testpca}, +@@ -128966,7 +128965,6 @@ _s_testrecord unittests[] = + {"parametric",testparametric}, + {"spline3d",testspline3d}, + {"polint",testpolint}, +- {"lsfit",testlsfit}, + {"spline2d",testspline2d}, + {"rbf",testrbf}, + {"hermite",testhermite}, diff -Nru alglib-3.10.0/debian/rules alglib-3.16.0/debian/rules --- alglib-3.10.0/debian/rules 2015-08-26 05:47:27.000000000 +0000 +++ alglib-3.16.0/debian/rules 2020-01-02 20:18:02.000000000 +0000 @@ -1,15 +1,25 @@ #!/usr/bin/make -f -%: - dh $@ --parallel -UPSTREAM_VERSION=3.10 +export DEB_BUILD_MAINT_OPTIONS = hardening=+all -ifneq (,$(filter $(DEB_HOST_ARCH),mips mipsel)) -override_dh_auto_test: +DEB_HOST_ARCH ?= $(shell dpkg-architecture -qDEB_HOST_ARCH) + +ifneq (,$(findstring $(DEB_HOST_ARCH), i386 mipsel hurd-i386 kfreebsd-i386 )) + export DEB_CFLAGS_MAINT_APPEND = -ffloat-store + export DEB_CXXFLAGS_MAINT_APPEND = -ffloat-store endif -override_dh_strip: - dh_strip --dbg-package=libalglib$(UPSTREAM_VERSION)-dbg +%: + dh $@ + +UPSTREAM_VERSION=3.14 override_dh_auto_configure: dh_auto_configure -- -DVERSION="$(UPSTREAM_VERSION).0" -DSOVERSION="$(UPSTREAM_VERSION)" + +# Disable auto test for the mips platform, because it fails sometimes +# on weak machiens with timouts +disable_auto_test_archs = mips +ifneq (,$(filter $(DEB_HOST_ARCH),$(disable_auto_test_archs))) +override_dh_auto_test: +endif diff -Nru alglib-3.10.0/debian/tests/build1 alglib-3.16.0/debian/tests/build1 --- alglib-3.10.0/debian/tests/build1 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/tests/build1 2020-01-02 20:16:34.000000000 +0000 @@ -57,7 +57,7 @@ // We call function with "smp_" prefix, which means that ALGLIB // will try to execute it in parallel manner whenever it is possible. flops = 2*pow((double)n, (double)3); - alglib::smp_rmatrixgemm( + alglib::rmatrixgemm( n, n, n, 1.0, a, 0, 0, 0, diff -Nru alglib-3.10.0/debian/tests/build4 alglib-3.16.0/debian/tests/build4 --- alglib-3.10.0/debian/tests/build4 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/tests/build4 2020-01-02 20:16:34.000000000 +0000 @@ -121,15 +121,10 @@ c = "[[0,0],[0,0]]"; rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); - // SMP code with default number of worker threads - c = "[[0,0],[0,0]]"; - smp_rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); - printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[4,3],[2,4]] - // override number of worker threads - use two cores alglib::setnworkers(+2); c = "[[0,0],[0,0]]"; - smp_rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); + rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[4,3],[2,4]] return 0; } diff -Nru alglib-3.10.0/debian/tests/build5 alglib-3.16.0/debian/tests/build5 --- alglib-3.10.0/debian/tests/build5 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/tests/build5 2020-01-02 20:16:34.000000000 +0000 @@ -121,7 +121,7 @@ // example we choose to update upper triangle. // c = "[[0,0],[0,0]]"; - smp_rmatrixsyrk(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); + rmatrixsyrk(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[1,2],[0,4]] // @@ -133,7 +133,7 @@ // alglib::setnworkers(+2); c = "[[0,0],[0,0]]"; - smp_rmatrixsyrk(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); + rmatrixsyrk(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[1,2],[0,4]] return 0; } diff -Nru alglib-3.10.0/debian/watch alglib-3.16.0/debian/watch --- alglib-3.10.0/debian/watch 2015-08-26 05:40:58.000000000 +0000 +++ alglib-3.16.0/debian/watch 2020-01-02 20:18:04.000000000 +0000 @@ -1,2 +1,2 @@ version=3 -http://www.alglib.net/translator/re/alglib-(\d.*)\.cpp\.gpl\.(?:tgz|tbz2|txz|tar\.(?:gz|bz2|xz)) +https://www.alglib.net/translator/re/alglib-(\d.*)\.cpp\.gpl\.(?:tgz|tbz2|txz|tar\.(?:gz|bz2|xz)) diff -Nru alglib-3.10.0/manual.cpp.html alglib-3.16.0/manual.cpp.html --- alglib-3.10.0/manual.cpp.html 2015-08-19 12:24:24.000000000 +0000 +++ alglib-3.16.0/manual.cpp.html 2019-12-19 10:28:28.000000000 +0000 @@ -6,9 +6,12 @@ h3 { background-color: #E0E0E0; padding: 0.2em; } sheader { } .inlineheader { background-color: #E8E8E8; padding: 0.1em; font-weight:bold; } -.pagecontent { font-family: Arial; font-size: 10pt; width: 770px; text-align: justify; } -.pageheader { width: 770px; } -.source { font-family: "Courier New"; font-size: 10pt; } +.pagecontent { font-family: Verdana, Arial, sans-serif; font-size: 1.0em; width: 50em; text-align: justify; } +.pageheader { width: 50em; } +.source { font-family: "Courier New"; font-size: 1.0em; } + +code { font-family: monospace; font-size: 1.0em; } + .p_example { margin-left: 4em; } .p_note { margin-left: 50px; margin-right: 50px; font-size: 80%; } @@ -41,7 +44,7 @@
 1 Introduction
-    1.1 What is ALGLIB
+    1.1 What is ALGLIB
     1.2 ALGLIB license
     1.3 Documentation license
     1.4 Reference Manual and User Guide
@@ -59,30 +62,34 @@
     4.1 Adding to your project
     4.2 Configuring for your compiler
     4.3 Improving performance (CPU-specific and OS-specific optimizations)
-5 Working with commercial version
-    5.1 Benefits of commercial version
-    5.2 Working with SSE support (Intel/AMD users)
-    5.3 Using multithreading
-        5.3.1 SMT (CMT/hyper-threading) issues
-    5.4 Linking with Intel MKL
-        5.4.1 Using lightweight Intel MKL supplied by ALGLIB Project
-        5.4.2 Using your own installation of Intel MKL
-    5.5 Examples - compiling commercial edition of ALGLIB
-        5.5.1 Introduction
-        5.5.2 Compiling under Windows
-6 Using ALGLIB
-    6.1 Thread-safety
-    6.2 Global definitions
-    6.3 Datatypes
-    6.4 Constants
-    6.5 Functions
-    6.6 Working with vectors and matrices
-    6.7 Using functions: 'expert' and 'friendly' interfaces
-    6.8 Handling errors
-    6.9 Working with Level 1 BLAS functions
-    6.10 Reading data from CSV files
+    4.4 Examples (free and commercial editions)
+        4.4.1 Introduction
+        4.4.2 Compiling under Windows
+        4.4.3 Compiling under Linux
+5 Using ALGLIB
+    5.1 Thread-safety
+    5.2 Global definitions
+    5.3 Datatypes
+    5.4 Constants
+    5.5 Functions
+    5.6 Working with vectors and matrices
+    5.7 Using functions: 'expert' and 'friendly' interfaces
+    5.8 Handling errors
+    5.9 Working with Level 1 BLAS functions
+    5.10 Reading data from CSV files
+6 Working with commercial version
+    6.1 Benefits of commercial version
+    6.2 Working with SIMD support (Intel/AMD users)
+    6.3 Using multithreading
+        6.3.1 General information
+        6.3.2 SMT (CMT/hyper-threading) issues
+    6.4 Linking with Intel MKL
+        6.4.1 Using lightweight Intel MKL supplied by ALGLIB Project
+        6.4.2 Using your own installation of Intel MKL
 7 Advanced topics
-    7.1 Testing ALGLIB
+    7.1 Exception-free mode
+    7.2 Partial compilation
+    7.3 Testing ALGLIB
 8 ALGLIB packages and subpackages
     8.1 AlglibMisc package
     8.2 DataAnalysis package
@@ -100,11 +107,11 @@
 

1 Introduction

-

1.2 1.1 What is ALGLIB

+

1.1 What is ALGLIB

ALGLIB is a cross-platform numerical analysis and data mining library. -It supports several programming languages (C++, C#, Pascal, VBA) and several operating systems (Windows, *nix family). +It supports several programming languages (C++, C#, Delphi, VB.NET, Python) and several operating systems (Windows, *nix family).

@@ -123,8 +130,8 @@

    -
  • ALGLIB Free Edition - full functionality but limited performance
  • -
  • ALGLIB Commercial Edition - high-performance version of ALGLIB
  • +
  • ALGLIB Free Edition - full functionality but limited performance and license
  • +
  • ALGLIB Commercial Edition - high-performance version of ALGLIB with business-friendly license

@@ -135,13 +142,23 @@ We obtained license from Intel corp., which allows us to integrate Intel MKL into ALGLIB, so you don't have to buy separate license from Intel.

-

1.2 1.1 ALGLIB license

+

1.2 ALGLIB license

-ALGLIB Free Edition is distributed under GPL 2+, GPL license version 2 or at your option any later version. -A copy of the GNU General Public License is available at http://www.fsf.org/licensing/licenses +ALGLIB Free Edition is distributed under license which favors non-commmercial usage, +but is not well suited for commercial applications:

+ +

ALGLIB Commercial Edition is distributed under license which is friendly to commericial users. A copy of the commercial license can be found at http://www.alglib.net/commercial.php. @@ -155,7 +172,7 @@

-Copyright 1994-2009 Sergey Bochkanov, ALGLIB Project. All rights reserved. +Copyright 1994-2017 Sergey Bochkanov, ALGLIB Project. All rights reserved.

@@ -291,7 +308,7 @@

  • Performance. -Many algorithms in commercial ALGLIB are multi-threaded and SSE-optimized (when used on Intel systems). +Many algorithms in commercial ALGLIB are multi-threaded and SIMD-optimized (when used on Intel systems). Open source ALGLIB is single-threaded and can not efficiently use modern multicore CPU's.
    You have to study comments on specific functions if you want to know whether they have multithreaded versions or not.
  • @@ -322,7 +339,7 @@

    -As for Intel architecture, ALGLIB works with both FPU-based and SSE-based implementations of floating point math. +As for Intel architecture, ALGLIB works with both FPU-based and SIMD-based implementations of floating point math. 80-bit internal representation used by Intel FPU is not a problem for ALGLIB.

    @@ -358,7 +375,7 @@

    -All modern compilers (in particular, reasonably new versions of MSVC, GCC and Sun Studio) satisfy these requirements. +All modern compilers satisfy these requirements.

    @@ -401,7 +418,7 @@

    Adding ALGLIB to your project is easy - just pick packages you need and... add them to your project! -Under most used compilers (GCC, MSVC, Sun Studio) it will work without any additional settings. +Under most used compilers (GCC, MSVC) it will work without any additional settings. In other cases you will need to define several preprocessor definitions (this topic will be detailed below), but everything will still be simple.

    @@ -432,13 +449,14 @@

    4.2 Configuring for your compiler

    -If you use modern versions of MSVC, GCC or Sun Studio, you don't need to configure ALGLIB at all. +If you use modern versions of MSVC or GCC, you don't need to configure ALGLIB at all. But if you use outdated versions of these compilers (or something else), then you may need to tune definitions of several data types:

    • alglib_impl::ae_int32_t - signed integer which is 32 bits wide
    • alglib_impl::ae_int64_t - signed integer which is 64 bits wide
    • +
    • alglib_impl::ae_uint64_t - unsigned integer which is 64 bits wide
    • alglib_impl::ae_int_t - signed integer which has same width as pointer
    @@ -449,6 +467,7 @@
    • ae_int32_t is defined as int, because this type is 32 bits wide in all modern compilers.
    • ae_int64_t is defined as _int64 (MSVC) or as signed long long (GCC, Sun Studio).
    • +
    • ae_uint64_t is defined as unsigned _int64 (MSVC) or as unsigned long long (GCC, Sun Studio).
    • ae_int_t is defined as ptrdiff_t.
    @@ -458,8 +477,9 @@
    • if your compiler provides stdint.h, you can define AE_HAVE_STDINT conditional symbol
    • -
    • alternatively, you can manually define AE_INT32_T and/or AE_INT64_T and/or AE_INT_T symbols. -Just assign datatype name to them, and ALGLIB will automatically use your definition. You can define only one or two types (those which are not defined automatically).
    • +
    • alternatively, you can manually define AE_INT32_T and/or AE_INT64_T and/or AE_UINT64_T and/or AE_INT_T symbols. +Just assign datatype name to them, and ALGLIB will automatically use your definition. +You may define all or just one/two types (those which are not detected automatically).
    @@ -487,324 +507,206 @@

    -When AE_CPU macro is defined and equals to the AE_INTEL, it enables SSE2 support. -ALGLIB will use cpuid instruction to determine SSE2 presence at run-time and - in case we have SSE2 - to use SSE2-capable code. -ALGLIB uses SSE2 intrinsics which are portable across different compilers and efficient enough for most practical purposes. -As of ALGLIB 3.4, SSE2 support is available for MSVC, GCC and Sun Studio users only. -

    - -

    5 Working with commercial version

    - -

    5.1 Benefits of commercial version

    - -

    -Commercial version of ALGLIB for C++ features four important improvements over open source one: -

    - -
      -
    • -License. -Commercial license used by ALGLIB is friendly to closed source applications. -Unlike GPL, it does not require you to open source your application. -Thus, almost any commercial software developer is interested in obtaining commercial license. -
    • -
    • -Low-level optimizations. -Commercial version of ALGLIB includes SSE-optimized versions of many computationally intensive functions. -In particular, commercial version of neural networks outperforms open source one with 2-3x increase in speed - -even without multithreading! -It allows to increase performance on Intel/AMD platforms while still being able to use software under non-x86 CPU's. -
    • -
    • -Multithreading. -Commercial version of ALGLIB can utilize multicore capabilities of modern CPU's. -Large computational problems can be automatically split between different cores. -ALGLIB uses its own multithreading framework which does not depend on vendor/compiler support for technologies like OpenMP/MPI/... -It gives ALGLIB unprecedented portability across operating systems and compilers. -
    • -
    • -Integrated Intel MKL. -Commercial version of ALGLIB includes special MKL extensions - -special lightweight distribution of Intel MKL, high-performance numerical analysis library, accompanied by ALGLIB-MKL interface. -We obtained license from Intel which allows us to integrate MKL into ALGLIB distribution. -Linking with MKL accelerates many ALGLIB functions, -however due to license restrictions you can not use MKL directly (i.e. bypass ALGLIB interface between your program and MKL). -
    • -
    - -

    5.2 Working with SSE support (Intel/AMD users)

    - -

    -ALGLIB for C++ can utilize SSE2 set of instructions supported by all modern Intel and AMD x86 processors. -This feature is optional and must be explicitly turned on during compile-time. -If you do not activate it, ALGLIB will use generic C code, without any processor-specific assembly/intrinsics. -

    - -

    -Thus, if you turn on this feature, your code will run faster on x86_32 and x86_64 processors, -but will be unportable to non-x86 platforms (and Intel MIC platform, which is not exactly x86 and does not support SSE!). -From the other side, if you do not activate this feature, your code will be portable to almost any modern CPU (SPARC, ARM, ...). -

    - -

    -In order to turn on x86-specific optimizations, -you should define AE_CPU=AE_INTEL preprocessor definition at global level. -It will tell ALGLIB to use SSE intrinsics supported by GCC, MSVC and Intel compilers. -Additionally you should tell compiler to generate SSE-capable code. -It can be done in the project settings of your IDE or in the command line: -

    - -
    -
    -GCC example:
    -> g++ -msse2 -I. -DAE_CPU=AE_INTEL *.cpp -lm
    -
    -MSVC example:
    -> cl /I. /EHsc /DAE_CPU=AE_INTEL *.cpp
    -
    -
    - -

    5.3 Using multithreading

    - -

    -Commercial version of ALGLIB includes out-of-the-box support for multithreading. -Many (not all) computationally intensive problems can be solved in multithreaded mode. -You should read comments on specific ALGLIB functions to determine what can be multithreaded and what can not. -

    - -

    -ALGLIB does not depend on vendor/compiler support for technologies like OpenMP/MPI/... -Under Windows ALGLIB uses OS threads and custom synchronization framework. -Under POSIX-compatible OS (Solaris, Linux, FreeBSD, NetBSD, OpenBSD, ...) ALGLIB uses POSIX Threads -(standard *nix library which is shipped with any POSIX system) -with its threading and synchronization primitives. -It gives ALGLIB unprecedented portability across operating systems and compilers. -ALGLIB does not depend on presence of any custom multithreading library -or compiler support for any multithreading technology. -

    - -

    -If you want to use multithreaded capabilities of ALGLIB, you should: -

    - -
      -
    1. compile it in OS-specific mode (ALGLIB have to know what OS it is running on)
    2. -
    3. tell ALGLIB about number of worker threads to use
    4. -
    5. call multithreaded versions of computational functions
    6. -
    - -

    -Let explain it in more details... +When AE_CPU macro is defined and equals to the AE_INTEL, it enables SIMD support. +ALGLIB will use cpuid instruction to determine SIMD presence at run-time and use SIMD-capable code. +ALGLIB uses SIMD intrinsics which are portable across different compilers and efficient enough for most practical purposes.

    -1. -You should compile ALGLIB in OS-specific mode by #defining either -AE_OS=AE_WINDOWS or AE_OS=AE_POSIX at compile time, depending on OS being used. +If you want to use multithreaded capabilities of commercial version of ALGLIB, +you should compile it in OS-specific mode by #defining either AE_OS=AE_WINDOWS, +AE_OS=AE_POSIX or AE_OS=AE_LINUX (POSIX with Linux-specific extensions) at compile time, +depending on OS being used. Former corresponds to any modern OS (32/64-bit Windows XP and later) from Windows family, -while latter means almost any POSIX-compatible OS. -When compiling on POSIX, do not forget to link ALGLIB with libpthread library. -

    - -

    -2. -ALGLIB automatically determines number of cores on application startup. -On Windows it is done using GetSystemInfo() call. -On POSIX systems ALGLIB performs sysconf(_SC_NPROCESSORS_ONLN) system call. -This system call is supported by all modern POSIX-compatible systems: Solaris, Linux, FreeBSD, NetBSD, OpenBSD. +while latter means almost any POSIX-compatible OS (or any OS from the Linux family). +It applies only to commercial version of ALGLIB. +Open source version is always OS-agnostic, even in the presence of OS-specific definitions.

    -

    -By default, ALGLIB uses all available cores except for one. -Say, on 4-core system it will use three cores - unless being told to use more or less. -It will keep your system responsive during lengthy computations. -Such behavior may be changed with setnworkers() call: -

    +

    4.4 Examples (free and commercial editions)

    -
      -
    • alglib::setnworkers(0) = use all cores
    • -
    • alglib::setnworkers(-1) = leave one core unused
    • -
    • alglib::setnworkers(-2) = leave two cores unused
    • -
    • alglib::setnworkers(+2) = use 2 cores (even if you have more)
    • -
    +

    4.4.1 Introduction

    -You may want to specify maximum number of worker threads during compile time -by means of preprocessor definition AE_NWORKERS=N. -You can add this definition to compiler command line or change corresponding project settings in your IDE. -Here N can be any positive number. -ALGLIB will use exactly N worker threads, unless being told to use less by setnworkers() call. -

    - -

    -Some old POSIX-compatible operating systems do not support sysconf(_SC_NPROCESSORS_ONLN) system call -which is required in order to automatically determine number of active cores. -On these systems you should specify number of cores manually at compile time. -Without it ALGLIB will run in single-threaded mode. +In this section we'll consider different compilation scenarios for free and commercial versions of ALGLIB - +from simple platform-agnostic compilation to compiling/linking with MKL extensions.

    -3. -When you use commercial edition of ALGLIB, -you may choose between serial and multithreaded versions of SMP-capable functions: -

    -
      -
    • serial version works as usual, in the context of the calling thread
    • -
    • multithreaded version (with smp_ prefix in its name) creates (or wakes up) worker threads, -inserts task in the worker queue, and waits for completion of the task. -All processing is done in context of worker thread(s).
    • -
    -

    -You should carefully decide what version of function to use. -Starting/stopping worker thread costs tens of thousands of CPU cycles. -Thus you won't get multithreading speedup on small computational problems. +We assume that you unpacked ALGLIB distribution in the current directory and saved here demo.cpp file, +whose code is given below. Thus, in the current directory you should have exactly one file (demo.cpp) and +exactly one subdirectory (cpp folder with ALGLIB distribution).

    -

    5.3.1 SMT (CMT/hyper-threading) issues

    - -

    -Simultaneous multithreading (SMT) also known as Hyper-threading (Intel) -and Cluster-based Multithreading (AMD) -is a CPU design where several (usually two) logical cores share resources of one physical core. -Say, on dual-core system with 2x HT scale factor you will see 4 logical cores. -Each pair of these 4 cores, however, share same hardware resources. -Thus, you may get only marginal speedup when running highly optimized software which fully utilizes CPU resources. -

    +

    4.4.2 Compiling under Windows

    -Say, if one thread occupies floating-point unit, -another thread on the same physical core may work with integer numbers at the same time without any performance penalties. -In this case you may get some speedup due to having additional cores. -But if both threads keep FPU unit 100% busy, they won't get any multithreaded speedup. +File listing below contains the very basic program which uses ALGLIB to perform matrix-matrix multiplication. +After that program evaluates performance of GEMM (function being called) and prints result to console. +We'll show how performance of this program continually increases as we add more and more sophisticated compiler options.

    -

    -So, if 2 math-intensive threads are dispatched by OS scheduler to different physical cores, -you will get 2x speedup due to use of multithreading. -But if these threads are dispatched to different logical cores - but same physical core - you won't get any speedup at all! -One physical core will be 100% busy, and another one will be 100% idle. -From the other side, if you start four threads instead of two, your system will be 100% utilized independently of thread scheduling details. -

    +
    +demo.cpp (WINDOWS EXAMPLE)
    +
    +
    +#include <stdio.h>
    +#include <windows.h>
    +#include "LinAlg.h"
     
    -

    -Let we stress it one more time - multithreading speedup on SMT systems is highly dependent on number of threads you are running and decisions made by OS scheduler. -It is not 100% deterministic! -With "true SMP" when you run 2 threads, you get 2x speedup (or 1.95, or 1.80 - it depends on algorithm, but this factor is always same). -With SMT when you run 2 threads you may get your 2x speedup - or no speedup at all. -Modern OS schedulers do a good job on single-socket hardware, -but even in this "simple" case they give no guarantees of fair distribution of hardware resources. -And things become a bit tricky when you work with multi-socket hardware. -On SMT systems the only guaranteed way to 100% utilize your CPU is to create as many worker threads as there are logical cores. -In this case OS scheduler has no chance to make its work in a wrong way. -

    +double counter() +{ + return 0.001*GetTickCount(); +} -

    5.4 Linking with Intel MKL

    +int main() +{ + alglib::real_2d_array a, b, c; + int n = 2000; + int i, j; + double timeneeded, flops; + + // Initialize arrays + a.setlength(n, n); + b.setlength(n, n); + c.setlength(n, n); + for(i=0; i<n; i++) + for(j=0; j<n; j++) + { + a[i][j] = alglib::randomreal()-0.5; + b[i][j] = alglib::randomreal()-0.5; + c[i][j] = 0.0; + } + + // Set global threading settings (applied to all ALGLIB functions); + // default is to perform serial computations, unless parallel execution + // is activated. Parallel execution tries to utilize all cores; this + // behavior can be changed with alglib::setnworkers() call. + alglib::setglobalthreading(alglib::parallel); + + // Perform matrix-matrix product. + flops = 2*pow((double)n, (double)3); + timeneeded = counter(); + alglib::rmatrixgemm( + n, n, n, + 1.0, + a, 0, 0, 0, + b, 0, 0, 1, + 0.0, + c, 0, 0); + timeneeded = counter()-timeneeded; + + // Evaluate performance + printf("Performance is %.1f GFLOPS\n", (double)(1.0E-9*flops/timeneeded)); + + return 0; +} -

    5.4.1 Using lightweight Intel MKL supplied by ALGLIB Project

    +

    -Commercial edition of ALGLIB includes MKL extensions - -special lightweight distribution of Intel MKL, highly optimized numerical library from Intel - and precompiled ALGLIB-MKL interface libraries. -Linking your programs with MKL extensions allows you to run ALGLIB with maximum performance. -

    - -

    -Current version of ALGLIB features Windows-only MKL extensions, -but in future ALGLIB releases we will introduce MKL extensions for Linux systems. +Examples below cover Windows compilation from command line with MSVC. +It is very straightforward to adapt them to compilation from MSVC IDE - or to another compilers. +We assume that you already called %VCINSTALLDIR%\bin\amd64\vcvars64.bat batch file +which loads 64-bit build environment (or its 32-bit counterpart). +We also assume that current directory is clean before example is executed +(i.e. it has ONLY demo.cpp file and cpp folder). +We used 3.2 GHz 4-core CPU for this test.

    -Unlike the rest of the library, MKL extensions are distributed in binary-only form. -ALGLIB itself is still distributed in source code form, but Intel MKL and ALGLIB-MKL interface are distributed as precompiled dynamic/static libraries. -We can not distribute them in source because of license restrictions associated with Intel MKL. -Also due to license restrictions we can not give you direct access to MKL functionality. -You may use MKL to accelerate ALGLIB - without paying for MKL license - but you may not call its functions directly. -It is technically possible, but strictly prohibited by both MKL's EULA and ALGLIB License Agreement. -If you want to work with MKL, you should buy separate license from Intel. +First example covers platform-agnostic compilation without optimization settings - the most simple way to compile ALGLIB. +This step is same in both open source and commercial editions. +However, in platform-agnostic mode ALGLIB is unable to use all performance related features present in commercial edition.

    -MKL extensions are located in the /cpp/mkl-windows subdirectory of the ALGLIB distribution. -This directory includes: +We starts from copying all cpp and h files to current directory, +then we will compile them along with demo.cpp. +In this and following examples we will omit compiler output for the sake of simplicity.

    -
      -
    • mkl4alglib_32.lib - 32-bit import library for Intel MKL
    • -
    • mkl4alglib_32.dll - 32-bit external DLL with Intel MKL
    • -
    • mkl4alglib_64.lib - 64-bit import library for Intel MKL
    • -
    • mkl4alglib_64.dll - 64-bit external DLL with Intel MKL
    • -
    +
    +OS-agnostic mode, no compiler optimizations
    +
    +
    +> copy cpp\src\*.* .
    +> cl /I. /EHsc /Fedemo.exe *.cpp
    +> demo.exe
    +Performance is 0.7 GFLOPS
    +

    -In order to activate MKL extensions you should: +Well, 0.7 GFLOPS is not very impressing for a 3.2GHz CPU... Let's add /Ox to compiler parameters.

    -
      -
    • -compile ALGLIB source files with following preprocessor symbols defined on at global level:
      -    AE_OS=AE_WINDOWS (to activate multithreading capabilities)
      -    AE_CPU=AE_INTEL (to use SSE instructions provided by x86/x64 CPU's)
      -    AE_MKL (to use Intel MKL functions in ALGLIB)
      -If you compile from command line, you may write "/DAE_OS=AE_WINDOWS /DAE_CPU=AE_INTEL /DAE_MKL" -
    • -
    • -depending on CPU architecture, choose 32-bit or 64-bit LIB file - -mkl4alglib_32/64.lib - and link it with your application. -
    • -
    • -place mkl4alglib_32/64.dll into directory where it can be found during application startup -(usually - application dir) -
    • -
    +
    +OS-agnostic mode, /Ox optimization
    +
    +
    +> cl /I. /EHsc /Fedemo.exe /Ox *.cpp
    +> demo.exe
    +Performance is 0.9 GFLOPS
    +

    -Examples on linking from command line can be found in the next section. +Still not impressed. Let's turn on optimizations for x86 architecture: define AE_CPU=AE_INTEL. +This option provides some speed-up in both free and commercial editions of ALGLIB.

    -

    5.4.2 Using your own installation of Intel MKL

    +
    +OS-agnostic mode, ALGLIB knows it is x86/x64
    +
    +
    +> cl /I. /EHsc /Fedemo.exe /Ox /DAE_CPU=AE_INTEL *.cpp
    +> demo.exe
    +Performance is 4.5 GFLOPS
    +

    -If you bought separate license for Intel MKL, and want to use your own -installation of MKL - and not our lightweight distribution - then you -should compile ALGLIB as it was told in the previous section, with all necessary preprocessor definitions. -But instead of linking with mkl4alglib dynamic library, -you should add to your project mkl4alglib.c file from mkl-interface directory -and compile it (as C file) along with the rest of ALGLIB. +It is good, but we have 4 cores - and only one of them was used. +Defining AE_OS=AE_WINDOWS allows ALGLIB to use Windows threads to parallelize execution of some functions. +Starting from this moment, our example applies only to Commercial Edition.

    -

    -This C file implements interface between MKL and ALGLIB. -Having this file in your project and defining AE_MKL preprocessor definition -results in ALGLIB using MKL functions. -

    +
    +ALGLIB knows it is Windows on x86/x64 CPU (COMMERCIAL EDITION)
    +
    +
    +> cl /I. /EHsc /Fedemo.exe /Ox /DAE_CPU=AE_INTEL /DAE_OS=AE_WINDOWS *.cpp
    +> demo.exe
    +Performance is 16.0 GFLOPS
    +

    -However, this C file is just interface! -It is your responsibility to make sure that C/C++ compiler can find MKL headers, -and appropriate MKL static/dynamic libraries are linked to your application. -

    - -

    -If you link ALGLIB with your own installation if Intel MKL, -you may do so on any OS where MKL works - Windows or Linux. +Not bad. +And now we are ready to the final test - linking with MKL extensions.

    -

    5.5 Examples - compiling commercial edition of ALGLIB

    - -

    5.5.1 Introduction

    -

    -In this section we'll consider different compilation scenarios for commercial version of ALGLIB - -from simple platform-agnostic compilation to linking with MKL extensions. +Linking with MKL extensions differs a bit from standard way of linking with ALGLIB. +ALGLIB itself is compiled with one more preprocessor definition: we define AE_MKL symbol. +We also link ALGLIB with appropriate (32-bit or 64-bit) alglib???_??mkl.lib static library, +which is an import library for special lightweight MKL distribution, shipped with ALGLIB. +We also should copy to current directory appropriate alglib???_??mkl.dll binary file which contains Intel MKL.

    +
    +Linking with MKL extensions (COMMERCIAL EDITION)
    +
    +
    +> copy cpp\addons-mkl\alglib*64mkl.lib .
    +> copy cpp\addons-mkl\alglib*64mkl.dll .
    +> cl /I. /EHsc /Fedemo.exe /Ox /DAE_CPU=AE_INTEL /DAE_OS=AE_WINDOWS /DAE_MKL *.cpp alglib*64mkl.lib
    +> demo.exe
    +Performance is 33.1 GFLOPS
    +
    +

    -We assume that you unpacked ALGLIB distribution in the current directory and saved here demo.cpp file, -whose code is given below. Thus, in the current directory you should have exactly one file (demo.cpp) and -exactly one subdirectory (cpp folder with ALGLIB distribution). +From 0.7 GFLOPS to 33.1 GFLOPS - you may see that commercial version of ALGLIB is really worth it!

    -

    5.5.2 Compiling under Windows

    +

    4.4.3 Compiling under Linux

    File listing below contains the very basic program which uses ALGLIB to perform matrix-matrix multiplication. @@ -813,16 +715,23 @@

    -demo.cpp
    +demo.cpp (LINUX EXAMPLE)
     
     #include <stdio.h>
    -#include <windows.h>
    +#include <sys/time.h>
     #include "LinAlg.h"
     
     double counter()
     {
    -    return 0.001*GetTickCount();
    +    struct timeval now;
    +    alglib_impl::ae_int64_t r, v;
    +    gettimeofday(&now, NULL);
    +    v = now.tv_sec;
    +    r = v*1000;
    +    v = now.tv_usec/1000;
    +    r = r+v;
    +    return 0.001*r;
     }
     
     int main()
    @@ -844,16 +753,16 @@
                 c[i][j] = 0.0;
             }
         
    -    // Set number of worker threads: "4" means "use 4 cores".
    -    // This line is ignored if AE_OS is UNDEFINED.
    -    alglib::setnworkers(4);
    +    // Set global threading settings (applied to all ALGLIB functions);
    +    // default is to perform serial computations, unless parallel execution
    +    // is activated. Parallel execution tries to utilize all cores; this
    +    // behavior can be changed with alglib::setnworkers() call.
    +    alglib::setglobalthreading(alglib::parallel);
         
         // Perform matrix-matrix product.
    -    // We call function with "smp_" prefix, which means that ALGLIB
    -    // will try to execute it in parallel manner whenever it is possible.
         flops = 2*pow((double)n, (double)3);
         timeneeded = counter();
    -    alglib::smp_rmatrixgemm(
    +    alglib::rmatrixgemm(
             n, n, n,
             1.0,
             a, 0, 0, 0,
    @@ -871,17 +780,15 @@
     

    -Examples below cover Windows compilation from command line with MSVC. -It is very straightforward to adapt them to compilation from MSVC IDE - or to another compilers. -We assume that you already called %VCINSTALLDIR%\bin\amd64\vcvars64.bat batch file -which loads 64-bit build environment (or its 32-bit counterpart). -We also assume that current directory is clean before example is executed +Examples below cover x64 Linux compilation from command line with GCC. +We assume that current directory is clean before example is executed (i.e. it has ONLY demo.cpp file and cpp folder). -We used 3.2 GHz 4-core CPU for this test. +We used 2.3 GHz 2-core Skylake CPU with 2x Hyperthreading enabled for this test.

    First example covers platform-agnostic compilation without optimization settings - the most simple way to compile ALGLIB. +This step is same in both open source and commercial editions. However, in platform-agnostic mode ALGLIB is unable to use all performance related features present in commercial edition.

    @@ -894,88 +801,99 @@
     OS-agnostic mode, no compiler optimizations
     
    -
    -> copy cpp\src\*.* .
    -> cl /I. /EHsc /Fedemo.exe *.cpp
    -> demo.exe
    -Performance is 0.7 GFLOPS
    +
    +> cp cpp/src/* .
    +> g++ -I. -o demo.out *.cpp
    +> ./demo.out
    +Performance is 0.9 GFLOPS
     

    -Well, 0.7 GFLOPS is not very impressing for a 3.2GHz CPU... Let's add /Ox to compiler parameters. +Let's add -O3 to compiler parameters.

    -OS-agnostic mode, /Ox optimization
    +OS-agnostic mode, -O3 optimization
     
    -
    -> copy cpp\src\*.* .
    -> cl /I. /EHsc /Fedemo.exe /Ox *.cpp
    -> demo.exe
    -Performance is 0.9 GFLOPS
    +
    +> g++ -I. -o demo.out -O3 *.cpp
    +> ./demo.out
    +Performance is 2.8 GFLOPS
     

    -Still not impressed. Let's turn on optimizations for x86 architecture: define AE_CPU=AE_INTEL. +Better, but not impressed. Let's turn on optimizations for x86 architecture: define AE_CPU=AE_INTEL. +This option provides some speed-up in both free and commercial editions of ALGLIB.

     OS-agnostic mode, ALGLIB knows it is x86/x64
     
    -
    -> copy cpp\src\*.* .
    -> cl /I. /EHsc /Fedemo.exe /Ox /DAE_CPU=AE_INTEL *.cpp
    -> demo.exe
    -Performance is 4.5 GFLOPS
    +
    +> g++ -I. -o demo.out -O3 -DAE_CPU=AE_INTEL *.cpp
    +> ./demo.out
    +Performance is 5.0 GFLOPS
     

    -It is good, but we have 4 cores - and only one of them was used. -Defining AE_OS=AE_WINDOWS allows ALGLIB to use Windows threads to parallelize execution of some functions. +It is good, but we have 4 cores (in fact, 2 cores - it is 2-way hyperthreaded system) and only one of them was used. +Defining AE_OS=AE_POSIX allows ALGLIB to use POSIX threads to parallelize execution of some functions. +You should also specify -pthread flag to link with pthreads standard library. +Starting from this moment, our example applies only to Commercial Edition.

    -ALGLIB knows it is Windows on x86/x64 CPU
    +ALGLIB knows it is POSIX OS on x86/x64 CPU (COMMERCIAL EDITION)
     
    -
    -> copy cpp\src\*.* .
    -> cl /I. /EHsc /Fedemo.exe /Ox /DAE_CPU=AE_INTEL /DAE_OS=AE_WINDOWS *.cpp
    -> demo.exe
    -Performance is 16.0 GFLOPS
    +
    +> g++ -I. -o demo.out -O3 -DAE_CPU=AE_INTEL -DAE_OS=AE_POSIX -pthread *.cpp
    +> ./demo.out
    +Performance is 9.0 GFLOPS
     

    -Not bad. +Not bad. You may notice that performance growth was ~2x, not 4x. +The reason is that we tested ALGLIB on hyperthreaded system: although we have 4 logical cores, +they share computational resources of just 2 physical cores. And now we are ready to the final test - linking with MKL extensions.

    Linking with MKL extensions differs a bit from standard way of linking with ALGLIB. ALGLIB itself is compiled with one more preprocessor definition: we define AE_MKL symbol. -We also link ALGLIB with appropriate (32-bit or 64-bit) mkl4alglib static library, -which is import library for special lightweight MKL distribution, shipped with ALGLIB for no additional price. -We also should copy to current directory appropriate mkl4alglib DLL file which contains Intel MKL. +We also link ALGLIB with appropriate alglib???_??mkl.so shared library, +which contains special lightweight MKL distribution shipped with ALGLIB. +

    + +

    +We should note that on typical Linux system shared libraries are not loaded from current directory by default. +Either you install them into one of the system directories, +or use some way to tell linker/loader that you want to load shared library from some specific directory. +For our example we choose to update LD_LIBRARY_PATH environment variable.

    -Linking with MKL extensions
    +Linking with MKL extensions (COMMERCIAL EDITION, relevant for ALGLIB 3.13)
     
    -
    -> copy cpp\src\*.* .
    -> copy cpp\mkl-windows\mkl4alglib_64.lib .
    -> copy cpp\mkl-windows\mkl4alglib_64.dll .
    -> cl /I. /EHsc /Fedemo.exe /Ox /DAE_CPU=AE_INTEL /DAE_OS=AE_WINDOWS /DAE_MKL demo.cpp mkl4alglib_64.lib
    -> demo.exe
    -Performance is 33.1 GFLOPS
    +
    +> cp cpp/addons-mkl/libalglib*64mkl.so .
    +> ls *.so
    +libalglib313_64mkl.so
    +> g++ -I. -o demo.out -O3 -DAE_CPU=AE_INTEL -DAE_OS=AE_POSIX -pthread -DAE_MKL -L. *.cpp -lalglib313_64mkl
    +> LD_LIBRARY_PATH=.
    +> export LD_LIBRARY_PATH
    +> ./demo.out
    +Performance is 33.8 GFLOPS
     

    -From 0.7 GFLOPS to 33.1 GFLOPS - you may see that commercial version of ALGLIB is really worth it! +Final result: from 0.9 GFLOPS to 33.8 GFLOPS!

    -

    6 Using ALGLIB

    -

    6.1 Thread-safety

    +

    5 Using ALGLIB

    + +

    5.1 Thread-safety

    Both open source and commercial versions of ALGLIB are 100% thread-safe @@ -995,12 +913,12 @@ because output is written to distinct arrays Y. Thus, you may want to process these vectors from parallel threads.

    -But it is not read-only operation, even if it looks like this! +But it is not read-only operation, even if it looks like that! Neural network object NET allocates internal temporary buffers, which are modified by neural processing functions. Thus, sharing one instance of neural network between two threads is thread-unsafe!

    -

    6.2 Global definitions

    +

    5.2 Global definitions

    ALGLIB defines several conditional symbols (all start with "AE_" which means "ALGLIB environment") and two namespaces: @@ -1008,12 +926,14 @@

    -Although this manual mentions both alglib_impl and alglib namespaces, only alglib namespace should be used by you. +Although this manual mentions both alglib_impl and alglib namespaces, +only alglib namespace should be used by you. It contains user-friendly C++ interface with automatic memory management, exception handling and all other nice features. -alglib_impl is less user-friendly, is less documented, and it is too easy to crash your system or cause memory leak if you use it directly. +alglib_impl is less user-friendly, is less documented, +and it is too easy to crash your system or cause memory leak if you use it directly.

    -

    6.3 Datatypes

    +

    5.3 Datatypes

    ALGLIB (ap.h header) defines several "basic" datatypes (types which are used by all packages) and many package-specific datatypes. "Basic" datatypes are: @@ -1042,7 +962,7 @@

  • "object-like" classes which have no public fields. You should use ALGLIB functions to work with them.
  • -

    6.4 Constants

    +

    5.4 Constants

    The most important constants (defined in the ap.h header) from ALGLIB namespace are: @@ -1058,7 +978,7 @@

  • alglib::fp_neginf - negative infinity
  • -

    6.5 Functions

    +

    5.5 Functions

    The most important "basic" functions from ALGLIB namespace (ap.h header) are: @@ -1081,7 +1001,7 @@

  • alglib::fp_isfinite - checks whether number is finite value (possibly subnormalized)
  • -

    6.6 Working with vectors and matrices

    +

    5.6 Working with vectors and matrices

    ALGLIB (ap.h header) supports matrixes and vectors (one-dimensional and two-dimensional arrays) of variable size, with numeration starting from zero. @@ -1160,8 +1080,25 @@

    +You can also attach real vector/matrix object to already allocated double precision array +(attaching to boolean/integer/complex arrays is not supported). +In this case, no actual data is copied, and attached vector/matrix object becomes a read/write proxy for external array. +

    + +
    +alglib::real_1d_array r1;
    +double a1[] = {2, 3};
    +r1.attach_to_ptr(2,a1);
    +
    +alglib::real_2d_array r2;
    +double a2[] = {11, 12, 13, 21, 22, 23};
    +r2.attach_to_ptr(2,3,_r2);
    +
    + +

    To access the array elements, an overloaded operator() or operator[] can used. -That is, the code addressing the element of array a with indexes [i,j] can look like a(i,j) or a[i][j]. +That is, the code addressing the element of array a with indexes [i,j] can look like +a(i,j) or a[i][j].

    @@ -1209,7 +1146,7 @@
     printf("%ld\n", (long)b.cols());
     
    -

    6.7 Using functions: 'expert' and 'friendly' interfaces

    +

    5.7 Using functions: 'expert' and 'friendly' interfaces

    Most ALGLIB functions provide two interfaces: 'expert' and 'friendly'. What is the difference between two? When you use 'friendly' interface, ALGLIB: @@ -1320,7 +1257,7 @@

    -

    6.8 Handling errors

    +

    5.8 Handling errors

    ALGLIB uses two error handling strategies: @@ -1362,13 +1299,15 @@

    -First error handling strategy (error codes) is used to report "frequent" errors, which can occur during normal execution of user program. -Second error handling strategy (exceptions) is used to report "rare" errors which are result of serious flaws in your program (or ALGLIB) - +First error handling strategy (error codes) is used to report "frequent" errors +which can occur during normal execution of user program. +Second error handling strategy (exceptions) is used to report "rare" errors +which are result of serious flaws in your program (or ALGLIB) - infinities/NAN's in the inputs, inconsistent inputs, etc.

    -

    6.9 Working with Level 1 BLAS functions

    +

    5.9 Working with Level 1 BLAS functions

    ALGLIB (ap.h header) includes following Level 1 BLAS functions: @@ -1680,7 +1619,7 @@ alglib::complex alpha);

    -

    6.10 Reading data from CSV files

    +

    5.10 Reading data from CSV files

    ALGLIB (ap.h header) has alglib::read_csv() function @@ -1703,82 +1642,523 @@

    -

    7 Advanced topics

    -

    7.1 Testing ALGLIB

    +

    6 Working with commercial version

    + +

    6.1 Benefits of commercial version

    -There are two test suites in ALGLIB: computational tests and interface tests. -Computational tests are located in /tests/test_c.cpp. -They are focused on numerical properties of algorithms, stress testing and "deep" tests (large automatically generated problems). -They require significant amount of time to finish (tens of minutes). +Commercial version of ALGLIB for C++ features four important improvements over open source one: +

    + +
      +
    • +License. +Commercial license used by ALGLIB is friendly to closed source applications. +Unlike GPL, it does not require you to open source your application. +Thus, almost any commercial software developer is interested in obtaining commercial license. +
    • +
    • +Low-level optimizations. +Commercial version of ALGLIB includes SIMD-optimized versions of many computationally intensive functions. +It allows to increase performance on Intel/AMD platforms while still being able to use software under non-x86 CPU's. +
    • +
    • +Multithreading. +Commercial version of ALGLIB can utilize multicore capabilities of modern CPU's. +Large computational problems can be automatically split between different cores. +ALGLIB uses its own multithreading framework which does not depend on vendor/compiler support for technologies like OpenMP/MPI/... +It gives ALGLIB unprecedented portability across operating systems and compilers. +
    • +
    • +Integrated Intel MKL. +Commercial version of ALGLIB includes special MKL extensions - +special lightweight distribution of Intel MKL, high-performance numerical analysis library, accompanied by ALGLIB-MKL interface. +We obtained license from Intel which allows us to integrate MKL into ALGLIB distribution. +Linking with MKL accelerates many ALGLIB functions, +however due to license restrictions you can not use MKL directly (i.e. bypass ALGLIB interface between your program and MKL). +
    • +
    + +

    6.2 Working with SIMD support (Intel/AMD users)

    + +

    +ALGLIB for C++ can utilize SIMD instructions supported by Intel and AMD processors. +This feature is optional and must be explicitly turned on during compile-time. +If you do not activate it, ALGLIB will use generic C code, without any processor-specific assembly/intrinsics.

    -Interface tests are located in /tests/test_i.cpp. -These tests are focused on ability to correctly pass data between computational core and caller, ability to detect simple problems in inputs, -and on ability to at least compile ALGLIB with your compiler. -They are very fast (about a minute to finish including compilation time). +Thus, if you turn on this feature, your code will run faster on x86_32 and x86_64 processors, +but will be unportable to non-x86 platforms (and Intel MIC platform, which is not exactly x86!). +From the other side, if you do not activate this feature, your code will be portable to almost any modern CPU (SPARC, ARM, ...).

    -Running test suite is easy - just +In order to turn on x86-specific optimizations, +you should define AE_CPU=AE_INTEL preprocessor definition at global level. +It will tell ALGLIB to use SIMD intrinsics supported by GCC, MSVC and Intel compilers. +Additionally you should tell compiler to generate SIMD-capable code. +It can be done in the project settings of your IDE or in the command line: +

    + +
    +
    +GCC example:
    +> g++ -msse2 -I. -DAE_CPU=AE_INTEL *.cpp -lm
    +
    +MSVC example:
    +> cl /I. /EHsc /DAE_CPU=AE_INTEL *.cpp
    +
    +
    + +

    6.3 Using multithreading

    + +

    6.3.1 General information

    + +

    +Commercial version of ALGLIB includes out-of-the-box support for multithreading. +Many (not all) computationally intensive problems can be solved in multithreaded mode. +You should read comments on specific ALGLIB functions to determine what can be multithreaded and what can not. +

    + +

    +ALGLIB does not depend on vendor/compiler support for technologies like OpenMP/MPI/... +Under Windows ALGLIB uses OS threads and custom synchronization framework. +Under POSIX-compatible OS (Solaris, Linux, FreeBSD, NetBSD, OpenBSD, ...) ALGLIB uses POSIX Threads +(standard *nix library which is shipped with any POSIX system) +with its threading and synchronization primitives. +It gives ALGLIB unprecedented portability across operating systems and compilers. +ALGLIB does not depend on presence of any custom multithreading library +or compiler support for any multithreading technology. +

    + +

    +If you want to use multithreaded capabilities of ALGLIB, you should:

      -
    1. compile one of these files (test_c.cpp or test_i.cpp) along with the rest of the library
    2. -
    3. launch executable you will get. It may take from several seconds (interface tests) to several minutes (computational tests) to get final results
    4. +
    5. compile it in OS-specific mode (ALGLIB have to know what OS it is running on)
    6. +
    7. activate multithreading at global level with alglib::setglobalthreading function call +or enable it at per-function basis by passing alglib::parallel to the specific computational function
    8. +
    9. optionally, tell ALGLIB about number of worker threads to use (default is to utilize all cores)

    -If you want to be sure that ALGLIB will work with some sophisticated optimization settings, set corresponding flags during compile time. -If your compiler/system are not in the list of supported ones, we recommend you to run both test suites. But if you are running out of time, run at least test_i.cpp. +Let explain it in more details...

    +

    +1. +You should compile ALGLIB in OS-specific mode by #defining either +AE_OS=AE_WINDOWS or AE_OS=AE_POSIX +(or AE_OS=AE_LINUX, which means "POSIX with Linux extensions") at compile time, +depending on OS being used. +Former corresponds to any modern OS (32/64-bit Windows XP and later) from Windows family, +while latter two mean almost any POSIX-compatible OS or any OS from the Linux family. +When compiling on POSIX/Linux, do not forget to link ALGLIB with libpthread library. +

    +

    +2. +By default, ALGLIB is configured to perform all calculations in serial manner. +Parallel execution can be manually enabled at either global or per-function level. +Former means that all ALGLIB functions will be able to parallelize themselves at their discretion. +Similarly, you can enable global parallelism, but selectively disable it for specific function calls. +

    -
    -

    8 ALGLIB packages and subpackages

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -

    8.1 AlglibMisc package

    -
    hqrnd High quality random numbers generator
    nearestneighbor Nearest neighbor search: approximate and exact
    xdebug Debug functions to test ALGLIB interface generator
     
    -

    8.2 DataAnalysis package

    -
    bdss Basic dataset functions
    clustering Clustering functions (hierarchical, k-means, k-means++)
    datacomp Backward compatibility functions
    dforest Decision forest classifier (regression model)
    filters Different filters used in data analysis
    lda Linear discriminant analysis
    linreg Linear models
    logit Logit models
    mcpd Markov Chains for Population/proportional Data
    mlpbase Basic functions for neural networks
    mlpe Basic functions for neural ensemble models
    mlptrain Neural network training
    pca Principal component analysis
     
    -

    8.3 DiffEquations package

    -
    odesolver Ordinary differential equation solver
     
    -

    8.4 FastTransforms package

    -
    conv Fast real/complex convolution
    corr Fast real/complex cross-correlation
    fft Real/complex FFT
    fht Real Fast Hartley Transform
     
    -

    8.5 Integration package

    +

    +Global parallelism is enabled by alglib::setglobalthreading(alglib::parallel) and +disabled by alglib::setglobalthreading(alglib::serial) call. +Function-level parallelism can be enabled (or disabled, if global default is to parallelize) +by adding alglib::parallel (or alglib::serial) to the end of the parameters list +of specific ALGLIB function being called. +

    + +

    +Enabling parallelism does not guarantee that ALGLIB will parallelize its computations. +Small problems (say, products of 64x64 matrices) can not be efficiently parallelized. +ALGLIB will automatically decide whether your problem is large enough or not for efficient parallelization. +

    + +

    +3. +ALGLIB automatically determines number of cores on application startup. +On Windows it is done using GetSystemInfo() call. +On POSIX systems ALGLIB performs sysconf(_SC_NPROCESSORS_ONLN) system call. +This system call is supported by all modern POSIX-compatible systems: Solaris, Linux, FreeBSD, NetBSD, OpenBSD. +

    + +

    +By default, ALGLIB uses all available cores (when told to parallelize calculations). +Such behavior may be changed with setnworkers() call: +

    + +
      +
    • alglib::setnworkers(0) = use all cores
    • +
    • alglib::setnworkers(-1) = leave one core unused
    • +
    • alglib::setnworkers(-2) = leave two cores unused
    • +
    • alglib::setnworkers(+2) = use 2 cores (even if you have more)
    • +
    + +

    +You may want to specify maximum number of worker threads during compile time +by means of preprocessor definition AE_NWORKERS=N. +You can add this definition to compiler command line or change corresponding project settings in your IDE. +Here N can be any positive number. +ALGLIB will use exactly N worker threads, unless being told to use less by setnworkers() call. +

    + +

    +Some old POSIX-compatible operating systems do not support sysconf(_SC_NPROCESSORS_ONLN) system call +which is required in order to automatically determine number of active cores. +On these systems you should specify number of cores manually at compile time. +Without it ALGLIB will run in single-threaded mode. +

    + +

    6.3.2 SMT (CMT/hyper-threading) issues

    + +

    +Simultaneous multithreading (SMT) also known as Hyper-threading (Intel) +and Cluster-based Multithreading (AMD) +is a CPU design where several (usually two) logical cores share resources of one physical core. +Say, on dual-core system with 2x HT scale factor you will see 4 logical cores. +Each pair of these 4 cores, however, share same hardware resources. +Thus, you may get only marginal speedup when running highly optimized software which fully utilizes CPU resources. +

    + +

    +Say, if one thread occupies floating-point unit, +another thread on the same physical core may work with integer numbers at the same time without any performance penalties. +In this case you may get some speedup due to having additional cores. +But if both threads keep FPU unit 100% busy, they won't get any multithreaded speedup. +

    + +

    +So, if 2 math-intensive threads are dispatched by OS scheduler to different physical cores, +you will get 2x speedup due to use of multithreading. +But if these threads are dispatched to different logical cores - but same physical core - you won't get any speedup at all! +One physical core will be 100% busy, and another one will be 100% idle. +From the other side, if you start four threads instead of two, your system will be 100% utilized independently of thread scheduling details. +

    + +

    +Let we stress it one more time - multithreading speedup on SMT systems is highly dependent on number of threads you are running and decisions made by OS scheduler. +It is not 100% deterministic! +With "true SMP" when you run 2 threads, you get 2x speedup (or 1.95, or 1.80 - it depends on algorithm, but this factor is always same). +With SMT when you run 2 threads you may get your 2x speedup - or no speedup at all. +Modern OS schedulers do a good job on single-socket hardware, +but even in this "simple" case they give no guarantees of fair distribution of hardware resources. +And things become a bit tricky when you work with multi-socket hardware. +On SMT systems the only guaranteed way to 100% utilize your CPU is to create as many worker threads as there are logical cores. +In this case OS scheduler has no chance to make its work in a wrong way. +

    + +

    6.4 Linking with Intel MKL

    + +

    6.4.1 Using lightweight Intel MKL supplied by ALGLIB Project

    + +

    +Commercial edition of ALGLIB includes MKL extensions - +special lightweight distribution of Intel MKL, highly optimized numerical library from Intel +- and precompiled ALGLIB-MKL interface libraries. +Linking your programs with MKL extensions allows you to run ALGLIB with maximum performance. +MKL binaries are delivered for x86/x64 Windows and x64 Linux platforms. +

    + +

    +Unlike the rest of the library, MKL extensions are distributed in binary-only form. +ALGLIB itself is still distributed in source code form, +but Intel MKL and ALGLIB-MKL interface are distributed as precompiled dynamic/static libraries. +We can not distribute them in source because of license restrictions associated with Intel MKL. +Also due to license restrictions we can not give you direct access to MKL functionality. +You may use MKL to accelerate ALGLIB - without paying for MKL license - but you may not call its functions directly. +It is technically possible, but strictly prohibited by both MKL's EULA and ALGLIB License Agreement. +If you want to work with MKL, you should obtain separate license from Intel (as of 2018, free licenses are available). +

    + +

    +MKL extensions are located in the /cpp/addons-mkl subdirectory of the ALGLIB distribution. +This directory includes following files: +

    + +
      +
    • alglib???_32mkl.lib - x86 Windows import library (MSVC) for Intel MKL extensions
    • +
    • alglib???_64mkl.lib - x64 Windows import library (MSVC) for Intel MKL extensions
    • +
    • alglib???_32mkl.dll - x86 Windows binary with Intel MKL inside
    • +
    • alglib???_64mkl.dll - x64 Windows binary with Intel MKL inside
    • +
    • libalglib???_64mkl.so - x64 Linux binary with Intel MKL inside
    • +
    + +

    +Here ??? stands for specific ALGLIB version: 313 for ALGLIB 3.13, and so on. +Files above are just MKL extensions - ALGLIB itself is not included in these binaries, +and you still have to compile primary ALGLIB distribution. +

    + +

    +In order to activate MKL extensions you should: +

    + +
      +
    • +#define globally AE_MKL to activate ALGLIB-MKL connection. +
    • +
    • +additionally, #define globally AE_OS=AE_WINDOWS or AE_OS=AE_POSIX to activate multithreading capabilities +
    • +
    • +additionally, #define globally AE_CPU=AE_INTEL to use SIMD instructions provided by x86/x64 CPU's in the rest of ALGLIB +
    • +
    • +Depending on your OS, perform one of the following:
      +    - Windows: choose 32-bit or 64-bit import library alglib???_32/64mkl.lib and link it with your application
      +    - Linux: choose 64-bit SO file alglib???_64mkl.so and link it with your application +
    • +
    • +place DLL/SO binary into directory where it can be found during application startup +(Windows: application dir; Linux: one of the system directories) +
    • +
    + +

    +Several examples of ALGLIB+MKL usage are given in the 'compiling ALGLIB: examples' section. +

    + +

    6.4.2 Using your own installation of Intel MKL

    + +

    +If you bought separate license for Intel MKL, and want to use your own +installation of MKL - and not our lightweight distribution - then you +should compile ALGLIB as it was told in the previous section, with all necessary preprocessor definitions +(AE_OS=AE_WINDOWS or AE_OS=AE_POSIX, AE_CPU=AE_INTEL and AE_MKL defined). +But instead of linking with MKL Extensions binary, +you should add to your project alglib2mkl.c file from addons-mkl directory +and compile it (as C file) along with the rest of ALGLIB. +

    + +

    +This C file implements interface between MKL and ALGLIB. +Having this file in your project and defining AE_MKL preprocessor definition +results in ALGLIB using MKL functions. +

    + +

    +However, this C file is just interface! +It is your responsibility to make sure that C/C++ compiler can find MKL headers, +and appropriate MKL static/dynamic libraries are linked to your application. +

    + +

    7 Advanced topics

    + +

    7.1 Exception-free mode

    + +

    +ALGLIB for C++ can be compiled in exception-free mode, with exceptions +(throw/try/catch constructs) being disabled at compiler level. +Such feature is sometimes used by developers of embedded software. +

    + +

    +ALGLIB uses two-level model of errors: +"expected" errors (like degeneracy of linear system or inconsistency of linear constraints) +are reported with dedicated completion codes, +and "critical" errors (like malloc failures, unexpected NANs/INFs in the input data and so on) +are reported with exceptions. +The idea is that it is hard to put (and handle) completion codes in every ALGLIB function, +so we use exceptions to signal errors which should never happen under normal circumstances. +

    + +

    +Internally ALGLIB for C++ is implemented as C++ wrapper around computational core written in pure C. +Thus, internals of ALGLIB core use C-specific methods of error handling - +completion codes and setjmp/longjmp functions. +These error handling strategies are combined with sophisticated machinery of C memory management +which makes sure that not even a byte of dynamic memory is lost when we make longjmp to the error handler. +So, the only point where C++ exceptions are actually used is a boundary between C core and C++ interface. +

    + +

    +If you choose to use exceptions (default mode), ALGLIB will throw an exception with short textual description of the situation. +And if you choose to work without exceptions, ALGLIB will set global error flag and silently return from the current +function/constructor/... instead of throwing an exception. +Due to portability issues this error flag is made to be a non-TLS variable, i.e. it is shared between different threads. +So, you can use exception-free error handling only in single-threaded programs - although multithreaded programs won't break, +there is no way to determine which thread caused an "exception without exceptions". +

    + +

    +Exception-free method of reporting critical errors can be activated by #defining two preprocessor symbols at global level: +

    + +
      +
    • AE_NO_EXCEPTIONS - to switch from exception-based to exception-free code
    • +
    • AE_THREADING=AE_SERIAL_UNSAFE - to confirm that you are aware of limitations associated +with exception-free mode (it does not support multithreading)
    • +
    + +

    +We must also note that exception-free mode is incompatible with OS-aware compiling: +you can not have AE_OS=??? defined together with AE_NO_EXCEPTIONS. +

    + +

    +After you #define all the necessary preprocessor symbols, two functions will appear in alglib namespace: +

    + +
      +
    • +bool alglib::get_error_flag(const char **p_msg = NULL), +which returns current error status (true is returned on error), +with optional char** parameter used to get human-readable error message. +
    • +
    • +void alglib::clear_error_flag(), which clears error flag +(ALGLIB functions set flag on failure, but do not clear it on successful calls) +
    • +
    + +

    +You must check error flag after EVERY operation with ALGLIB objects and functions. +In addition to calling computational ALGLIB functions, following kinds of operations may result in "exception": +

    + +
      +
    • +calling default constructor for ALGLIB object +(simply instantiating object may result in "exception"). +Due to large size ALGLIB objects are allocated dynamically +(they look like value types, but internally everything is stored in the heap memory). +Thus every constructor, even default one, makes at least one malloc() call which may fail. +
    • +
    • +calling copy/assignment constructors for ALGLIB objects (same reason - malloc) +
    • +
    • +resizing arrays with setlength()/setcontents() +and attaching to external memory with attach_to_ptr() +
    • +
    + +

    7.2 Partial compilation

    + +

    +Due to ALGLIB modular structure it is possible to selectively enable/disable some of its subpackages along with their dependencies. +Deactivation of ALGLIB source code is performed at preprocessor level - compiler does not even see disabled code. +Partial compilation can be used for two purposes: +

    + +
      +
    • +to reduce code size in cases when linker is unable to remove unused code +(happens with some compilers when ALGLIB is compiled as part of the shared library) +
    • +
    • +to reduce build time (disabled code is silently removed by preprocessor, +thus no time is spent in the compiler) +
    • +
    + +

    +You can activate partial compilation by #defining at global level following symbols: +

    + +
      +
    • +AE_PARTIAL_BUILD - to disable everything not enabled by default +
    • +
    • +AE_COMPILE_SUBPACKAGE - to selectively enable subpackage SUBPACKAGE, +with its name given in upper case (case sensitive). +You may combine several definitions like this in order to enable several subpackages. +Subpackage names can be found in the list of ALGLIB packages and subpackages. +
    • +
    + +

    7.3 Testing ALGLIB

    + +

    +There are three test suites in ALGLIB: computational tests, interface tests, extended tests. +Computational tests are located in /tests/test_c.cpp. +They are focused on numerical properties of algorithms, stress testing and "deep" tests (large automatically generated problems). +They require significant amount of time to finish (tens of minutes). +

    + +

    +Interface tests are located in /tests/test_i.cpp. +These tests are focused on ability to correctly pass data between computational core and caller, ability to detect simple problems in inputs, +and on ability to at least compile ALGLIB with your compiler. +They are very fast (about a minute to finish including compilation time). +

    + +

    +Extended tests are located in /tests/test_x.cpp. +These tests are focused on testing some special properties +(say, testing that cloning object indeed results in 100% independent copy being created) +and performance of several chosen algorithms. +

    + +

    +Running test suite is easy - just +

    + +
      +
    1. compile one of these files (test_c.cpp, test_i.cpp or test_x.cpp) +along with the rest of the library
    2. +
    3. launch executable you will get. It may take from several seconds (interface tests) to several minutes (computational tests) to get final results
    4. +
    + +

    +If you want to be sure that ALGLIB will work with some sophisticated optimization settings, set corresponding flags during compile time. +If your compiler/system are not in the list of supported ones, we recommend you to run both test suites. But if you are running out of time, run at least test_i.cpp. +

    + + + + +

    8 ALGLIB packages and subpackages

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1787,12 +2167,14 @@ - - + + + + - + @@ -1802,7 +2184,7 @@ - + @@ -1819,19 +2201,23 @@ + + + - + + @@ -1858,7 +2244,7 @@ - + @@ -1892,18 +2278,23 @@ rmatrixcopy
    rmatrixenforcesymmetricity
    rmatrixgemm
    +rmatrixgemv
    +rmatrixgencopy
    +rmatrixger
    rmatrixlefttrsm
    rmatrixmv
    rmatrixrank1
    rmatrixrighttrsm
    +rmatrixsymv
    rmatrixsyrk
    +rmatrixsyvmv
    rmatrixtranspose
    +rmatrixtrsv
    +rvectorcopy
    +

    8.1 AlglibMisc package

    +
    hqrnd High quality random numbers generator
    nearestneighbor Nearest neighbor search: approximate and exact
    xdebug Debug functions to test ALGLIB interface generator
     
    +

    8.2 DataAnalysis package

    +
    bdss Basic dataset functions
    clustering Clustering functions (hierarchical, k-means, k-means++)
    datacomp Backward compatibility functions
    dforest Decision forest classifier (regression model)
    filters Different filters used in data analysis
    knn K Nearest Neighbors classification/regression
    lda Linear discriminant analysis
    linreg Linear models
    logit Logit models
    mcpd Markov Chains for Population/proportional Data
    mlpbase Basic functions for neural networks
    mlpe Basic functions for neural ensemble models
    mlptrain Neural network training
    pca Principal component analysis
    ssa Singular Spectrum Analysis
     
    +

    8.3 DiffEquations package

    +
    odesolver Ordinary differential equation solver
     
    +

    8.4 FastTransforms package

    +
    conv Fast real/complex convolution
    corr Fast real/complex cross-correlation
    fft Real/complex FFT
    fht Real Fast Hartley Transform
     
    +

    8.5 Integration package

    autogk Adaptive 1-dimensional integration
    gkq Gauss-Kronrod quadrature generator

    8.6 Interpolation package

    idwint Inverse distance weighting: interpolation/fitting
    lsfit Linear and nonlinear least-squares solvers
    fitsphere Fitting circle/sphere to data (least squares, minimum circumscribed, maximum inscribed, minimum zone)
    idw Inverse distance weighting: interpolation/fitting with improved Shepard-like algorithm
    intcomp Backward compatibility functions
    lsfit Fitting with least squates target function (linear and nonlinear least-squares)
    parametric Parametric curves
    polint Polynomial interpolation/fitting
    ratint Rational interpolation/fitting
    rbf Scattered 2/3-dimensional interpolation with RBF models
    rbf Scattered N-dimensional interpolation with RBF models
    spline1d 1D spline interpolation/fitting
    spline2d 2D spline interpolation
    spline3d 3D spline interpolation
    ablas Level 2 and Level 3 BLAS operations
    bdsvd Bidiagonal SVD
    evd Eigensolvers
    evd Direct and iterative eigensolvers
    inverseupdate Sherman-Morrison update of the inverse matrix
    matdet Determinant calculation
    matgen Random matrix generation

    8.8 Optimization package

    minbc Box constrained optimizer with fast activation of multiple constraints per step
    minbleic Bound constrained optimizer with additional linear equality/inequality constraints
    mincg Conjugate gradient optimizer
    mincomp Backward compatibility functions
    minlbfgs Limited memory BFGS optimizer
    minlm Improved Levenberg-Marquardt optimizer
    minlp Linear programming suite
    minnlc Nonlinearly constrained optimizer
    minns Nonsmooth constrained optimizer
    minqp Quadratic programming with bound and linear equality/inequality constraints
    optguardapi OptGuard integrity checking for nonlinear models
     

    8.9 Solvers package

    densesolver Dense linear system solver
    directdensesolvers Direct dense linear solvers
    directsparsesolvers Direct sparse linear solvers
    lincg Sparse linear CG solver
    linlsqr Sparse linear LSQR solver
    nleq Solvers for nonlinear equations
    jacobianelliptic Jacobian elliptic functions
    laguerre Laguerre polynomials
    legendre Legendre polynomials
    normaldistr Normal distribution
    normaldistr Univarite and bivariate normal distribution PDF and CDF
    poissondistr Poisson distribution
    psif Psi function
    studenttdistr Student's t-distribution
    - -
    ablas_d_gemm Matrix multiplication (single-threaded)
    ablas_d_syrk Symmetric rank-K update (single-threaded)
    ablas_smp_gemm Matrix multiplication (multithreaded)
    ablas_smp_syrk Symmetric rank-K update (multithreaded)
    @@ -1928,7 +2319,8 @@
         ae_int_t ja,
         complex_2d_array& b,
         ae_int_t ib,
    -    ae_int_t jb);
    +    ae_int_t jb,
    +    const xparams _params = alglib::xdefault);
     
     
    @@ -1947,33 +2339,14 @@ * if Alpha=0, A is not used (not multiplied by zero - just not referenced) * if both Beta and Alpha are zero, C is filled by zeros. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2010,7 +2383,7 @@ JC - submatrix offset -- ALGLIB routine -- - 16.12.2009 + 2009-2019 Bochkanov Sergey *************************************************************************/
    void alglib::cmatrixgemm( @@ -2029,27 +2402,11 @@ alglib::complex beta, complex_2d_array& c, ae_int_t ic, - ae_int_t jc); -void alglib::smp_cmatrixgemm( - ae_int_t m, - ae_int_t n, - ae_int_t k, - alglib::smp_complex alpha, - complex_2d_array a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - complex_2d_array b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - alglib::smp_complex beta, - complex_2d_array& c, - ae_int_t ic, - ae_int_t jc); + ae_int_t jc, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    +

    Examples:   [1]  

     
    /************************************************************************* @@ -2059,39 +2416,19 @@ * A is NxK matrix when A*A^H is calculated, KxN matrix otherwise Additional info: -* cache-oblivious algorithm is used. * multiplication result replaces C. If Beta=0, C elements are not used in calculations (not multiplied by zero - just not referenced) * if Alpha=0, A is not used (not multiplied by zero - just not referenced) * if both Beta and Alpha are zero, C is filled by zeros. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2116,7 +2453,7 @@ other half unchanged (not referenced at all). -- ALGLIB routine -- - 16.12.2009 + 16.12.2009-22.01.2018 Bochkanov Sergey *************************************************************************/
    void alglib::cmatrixherk( @@ -2131,23 +2468,11 @@ complex_2d_array& c, ae_int_t ic, ae_int_t jc, - bool isupper); -void alglib::smp_cmatrixherk( - ae_int_t n, - ae_int_t k, - double alpha, - complex_2d_array a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - complex_2d_array& c, - ae_int_t ic, - ae_int_t jc, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    +

    Examples:   [1]  

     
    /************************************************************************* @@ -2155,37 +2480,16 @@ * X is MxN general matrix * A is MxM upper/lower triangular/unitriangular matrix * "op" may be identity transformation, transposition, conjugate transposition - Multiplication result replaces X. -Cache-oblivious algorithm is used. - -COMMERCIAL EDITION OF ALGLIB: - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2208,7 +2512,7 @@ J2 - submatrix offset -- ALGLIB routine -- - 15.12.2009 + 15.12.2009-22.01.2018 Bochkanov Sergey *************************************************************************/
    void alglib::cmatrixlefttrsm( @@ -2222,19 +2526,8 @@ ae_int_t optype, complex_2d_array& x, ae_int_t i2, - ae_int_t j2); -void alglib::smp_cmatrixlefttrsm( - ae_int_t m, - ae_int_t n, - complex_2d_array a, - ae_int_t i1, - ae_int_t j1, - bool isupper, - bool isunit, - ae_int_t optype, - complex_2d_array& x, - ae_int_t i2, - ae_int_t j2); + ae_int_t j2, + const xparams _params = alglib::xdefault);
    @@ -2281,7 +2574,8 @@ complex_1d_array x, ae_int_t ix, complex_1d_array& y, - ae_int_t iy); + ae_int_t iy, + const xparams _params = alglib::xdefault); @@ -2309,7 +2603,8 @@ complex_1d_array& u, ae_int_t iu, complex_1d_array& v, - ae_int_t iv); + ae_int_t iv, + const xparams _params = alglib::xdefault); @@ -2319,37 +2614,16 @@ * X is MxN general matrix * A is NxN upper/lower triangular/unitriangular matrix * "op" may be identity transformation, transposition, conjugate transposition - Multiplication result replaces X. -Cache-oblivious algorithm is used. - -COMMERCIAL EDITION OF ALGLIB: - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2372,7 +2646,7 @@ J2 - submatrix offset -- ALGLIB routine -- - 15.12.2009 + 20.01.2018 Bochkanov Sergey *************************************************************************/
    void alglib::cmatrixrighttrsm( @@ -2386,19 +2660,8 @@ ae_int_t optype, complex_2d_array& x, ae_int_t i2, - ae_int_t j2); -void alglib::smp_cmatrixrighttrsm( - ae_int_t m, - ae_int_t n, - complex_2d_array a, - ae_int_t i1, - ae_int_t j1, - bool isupper, - bool isunit, - ae_int_t optype, - complex_2d_array& x, - ae_int_t i2, - ae_int_t j2); + ae_int_t j2, + const xparams _params = alglib::xdefault);
    @@ -2424,20 +2687,8 @@ complex_2d_array& c, ae_int_t ic, ae_int_t jc, - bool isupper); -void alglib::smp_cmatrixsyrk( - ae_int_t n, - ae_int_t k, - double alpha, - complex_2d_array a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - complex_2d_array& c, - ae_int_t ic, - ae_int_t jc, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault); @@ -2463,7 +2714,8 @@ ae_int_t ja, complex_2d_array& b, ae_int_t ib, - ae_int_t jb); + ae_int_t jb, + const xparams _params = alglib::xdefault); @@ -2489,7 +2741,8 @@ ae_int_t ja, real_2d_array& b, ae_int_t ib, - ae_int_t jb); + ae_int_t jb, + const xparams _params = alglib::xdefault); @@ -2507,7 +2760,8 @@
    void alglib::rmatrixenforcesymmetricity( real_2d_array& a, ae_int_t n, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault);
    @@ -2526,33 +2780,14 @@ * if Alpha=0, A is not used (not multiplied by zero - just not referenced) * if both Beta and Alpha are zero, C is filled by zeros. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2587,7 +2822,7 @@ JC - submatrix offset -- ALGLIB routine -- - 2009-2013 + 2009-2019 Bochkanov Sergey *************************************************************************/
    void alglib::rmatrixgemm( @@ -2606,27 +2841,108 @@ double beta, real_2d_array& c, ae_int_t ic, - ae_int_t jc); -void alglib::smp_rmatrixgemm( + ae_int_t jc, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* + +*************************************************************************/ +
    void alglib::rmatrixgemv( ae_int_t m, ae_int_t n, - ae_int_t k, double alpha, real_2d_array a, ae_int_t ia, ae_int_t ja, - ae_int_t optypea, - real_2d_array b, + ae_int_t opa, + real_1d_array x, + ae_int_t ix, + double beta, + real_1d_array& y, + ae_int_t iy, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Performs generalized copy: B := Beta*B + Alpha*A. + +If Beta=0, then previous contents of B is simply ignored. If Alpha=0, then +A is ignored and not referenced. If both Alpha and Beta are zero, B is +filled by zeros. + +Input parameters: + M - number of rows + N - number of columns + Alpha- coefficient + A - source matrix, MxN submatrix is copied and transposed + IA - submatrix offset (row index) + JA - submatrix offset (column index) + Beta- coefficient + B - destination matrix, must be large enough to store result + IB - submatrix offset (row index) + JB - submatrix offset (column index) +*************************************************************************/ +
    void alglib::rmatrixgencopy( + ae_int_t m, + ae_int_t n, + double alpha, + real_2d_array a, + ae_int_t ia, + ae_int_t ja, + double beta, + real_2d_array& b, ae_int_t ib, ae_int_t jb, - ae_int_t optypeb, - double beta, - real_2d_array& c, - ae_int_t ic, - ae_int_t jc); + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Rank-1 correction: A := A + alpha*u*v' + +NOTE: this function expects A to be large enough to store result. No + automatic preallocation happens for smaller arrays. No integrity + checks is performed for sizes of A, u, v. + +INPUT PARAMETERS: + M - number of rows + N - number of columns + A - target matrix, MxN submatrix is updated + IA - submatrix offset (row index) + JA - submatrix offset (column index) + Alpha- coefficient + U - vector #1 + IU - subvector offset + V - vector #2 + IV - subvector offset + + + -- ALGLIB routine -- + + 16.10.2017 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixger( + ae_int_t m, + ae_int_t n, + real_2d_array& a, + ae_int_t ia, + ae_int_t ja, + double alpha, + real_1d_array u, + ae_int_t iu, + real_1d_array v, + ae_int_t iv, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

     
    /************************************************************************* @@ -2634,37 +2950,16 @@ * X is MxN general matrix * A is MxM upper/lower triangular/unitriangular matrix * "op" may be identity transformation, transposition - Multiplication result replaces X. -Cache-oblivious algorithm is used. - -COMMERCIAL EDITION OF ALGLIB: - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2686,7 +2981,7 @@ J2 - submatrix offset -- ALGLIB routine -- - 15.12.2009 + 15.12.2009-22.01.2018 Bochkanov Sergey *************************************************************************/
    void alglib::rmatrixlefttrsm( @@ -2700,24 +2995,16 @@ ae_int_t optype, real_2d_array& x, ae_int_t i2, - ae_int_t j2); -void alglib::smp_rmatrixlefttrsm( - ae_int_t m, - ae_int_t n, - real_2d_array a, - ae_int_t i1, - ae_int_t j1, - bool isupper, - bool isunit, - ae_int_t optype, - real_2d_array& x, - ae_int_t i2, - ae_int_t j2); + ae_int_t j2, + const xparams _params = alglib::xdefault);
     
    /************************************************************************* +IMPORTANT: this function is deprecated since ALGLIB 3.13. Use RMatrixGEMV() + which is more generic version of this function. + Matrix-vector product: y := op(A)*x INPUT PARAMETERS: @@ -2756,12 +3043,16 @@ real_1d_array x, ae_int_t ix, real_1d_array& y, - ae_int_t iy); + ae_int_t iy, + const xparams _params = alglib::xdefault);
     
    /************************************************************************* +IMPORTANT: this function is deprecated since ALGLIB 3.13. Use RMatrixGER() + which is more generic version of this function. + Rank-1 correction: A := A + u*v' INPUT PARAMETERS: @@ -2784,7 +3075,8 @@ real_1d_array& u, ae_int_t iu, real_1d_array& v, - ae_int_t iv); + ae_int_t iv, + const xparams _params = alglib::xdefault);
    @@ -2794,37 +3086,16 @@ * X is MxN general matrix * A is NxN upper/lower triangular/unitriangular matrix * "op" may be identity transformation, transposition - Multiplication result replaces X. -Cache-oblivious algorithm is used. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2846,7 +3117,7 @@ J2 - submatrix offset -- ALGLIB routine -- - 15.12.2009 + 15.12.2009-22.01.2018 Bochkanov Sergey *************************************************************************/
    void alglib::rmatrixrighttrsm( @@ -2860,19 +3131,28 @@ ae_int_t optype, real_2d_array& x, ae_int_t i2, - ae_int_t j2); -void alglib::smp_rmatrixrighttrsm( - ae_int_t m, - ae_int_t n, - real_2d_array a, - ae_int_t i1, - ae_int_t j1, - bool isupper, - bool isunit, - ae_int_t optype, - real_2d_array& x, - ae_int_t i2, - ae_int_t j2); + ae_int_t j2, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* + +*************************************************************************/ +
    void alglib::rmatrixsymv( + ae_int_t n, + double alpha, + real_2d_array a, + ae_int_t ia, + ae_int_t ja, + bool isupper, + real_1d_array x, + ae_int_t ix, + double beta, + real_1d_array& y, + ae_int_t iy, + const xparams _params = alglib::xdefault);
    @@ -2884,39 +3164,19 @@ * A is NxK matrix when A*A^T is calculated, KxN matrix otherwise Additional info: -* cache-oblivious algorithm is used. * multiplication result replaces C. If Beta=0, C elements are not used in calculations (not multiplied by zero - just not referenced) * if Alpha=0, A is not used (not multiplied by zero - just not referenced) * if both Beta and Alpha are zero, C is filled by zeros. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Because starting/stopping worker thread always - ! involves some overhead, parallelism starts to be profitable for N's - ! larger than 128. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -2939,7 +3199,7 @@ IsUpper - whether C is upper triangular or lower triangular -- ALGLIB routine -- - 16.12.2009 + 16.12.2009-22.01.2018 Bochkanov Sergey *************************************************************************/
    void alglib::rmatrixsyrk( @@ -2954,23 +3214,28 @@ real_2d_array& c, ae_int_t ic, ae_int_t jc, - bool isupper); -void alglib::smp_rmatrixsyrk( + bool isupper, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* + +*************************************************************************/ +
    double alglib::rmatrixsyvmv( ae_int_t n, - ae_int_t k, - double alpha, real_2d_array a, ae_int_t ia, ae_int_t ja, - ae_int_t optypea, - double beta, - real_2d_array& c, - ae_int_t ic, - ae_int_t jc, - bool isupper); + bool isupper, + real_1d_array x, + ae_int_t ix, + real_1d_array& tmp, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

     
    /************************************************************************* @@ -2994,7 +3259,78 @@ ae_int_t ja, real_2d_array& b, ae_int_t ib, - ae_int_t jb); + ae_int_t jb, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine solves linear system op(A)*x=b where: +* A is NxN upper/lower triangular/unitriangular matrix +* X and B are Nx1 vectors +* "op" may be identity transformation, transposition, conjugate transposition + +Solution replaces X. + +IMPORTANT: * no overflow/underflow/denegeracy tests is performed. + * no integrity checks for operand sizes, out-of-bounds accesses + and so on is performed + +INPUT PARAMETERS + N - matrix size, N>=0 + A - matrix, actial matrix is stored in A[IA:IA+N-1,JA:JA+N-1] + IA - submatrix offset + JA - submatrix offset + IsUpper - whether matrix is upper triangular + IsUnit - whether matrix is unitriangular + OpType - transformation type: + * 0 - no transformation + * 1 - transposition + X - right part, actual vector is stored in X[IX:IX+N-1] + IX - offset + +OUTPUT PARAMETERS + X - solution replaces elements X[IX:IX+N-1] + + -- ALGLIB routine / remastering of LAPACK's DTRSV -- + (c) 2017 Bochkanov Sergey - converted to ALGLIB + (c) 2016 Reference BLAS level1 routine (LAPACK version 3.7.0) + Reference BLAS is a software package provided by Univ. of Tennessee, + Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd. +*************************************************************************/ +
    void alglib::rmatrixtrsv( + ae_int_t n, + real_2d_array a, + ae_int_t ia, + ae_int_t ja, + bool isupper, + bool isunit, + ae_int_t optype, + real_1d_array& x, + ae_int_t ix, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Copy + +Input parameters: + N - subvector size + A - source vector, N elements are copied + IA - source offset (first element index) + B - destination vector, must be large enough to store result + IB - destination offset (first element index) +*************************************************************************/ +
    void alglib::rvectorcopy( + ae_int_t n, + real_1d_array a, + ae_int_t ia, + real_1d_array& b, + ae_int_t ib, + const xparams _params = alglib::xdefault);
    @@ -3134,230 +3470,6 @@ } - -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we assume that you already know how to work with
    -    // rmatrixgemm() function. Below we concentrate on its multithreading
    -    // capabilities.
    -    //
    -    // SMP edition of ALGLIB includes smp_rmatrixgemm() - multithreaded
    -    // version of rmatrixgemm() function. In the basic edition of ALGLIB
    -    // (GPL edition or commercial version without SMP support) this function
    -    // just calls single-threaded stub. So, you may call this function from
    -    // ANY edition of ALGLIB, but only in SMP edition it will work in really
    -    // multithreaded mode.
    -    //
    -    // In order to use multithreading, you have to:
    -    // 1) Install SMP edition of ALGLIB.
    -    // 2) This step is specific for C++ users: you should activate OS-specific
    -    //    capabilities of ALGLIB by defining AE_OS=AE_POSIX (for *nix systems)
    -    //    or AE_OS=AE_WINDOWS (for Windows systems).
    -    //    C# users do not have to perform this step because C# programs are
    -    //    portable across different systems without OS-specific tuning.
    -    // 3) Allow ALGLIB to know about number of worker threads to use:
    -    //    a) autodetection (C++, C#):
    -    //          ALGLIB will automatically determine number of CPU cores and
    -    //          (by default) will use all cores except for one. Say, on 4-core
    -    //          system it will use three cores - unless you manually told it
    -    //          to use more or less. It will keep your system responsive during
    -    //          lengthy computations.
    -    //          Such behavior may be changed with setnworkers() call:
    -    //          * alglib::setnworkers(0)  = use all cores
    -    //          * alglib::setnworkers(-1) = leave one core unused
    -    //          * alglib::setnworkers(-2) = leave two cores unused
    -    //          * alglib::setnworkers(+2) = use 2 cores (even if you have more)
    -    //    b) manual specification (C++, C#):
    -    //          You may want to specify maximum number of worker threads during
    -    //          compile time by means of preprocessor definition AE_NWORKERS.
    -    //          For C++ it will be "AE_NWORKERS=X" where X can be any positive number.
    -    //          For C# it is "AE_NWORKERSX", where X should be replaced by number of
    -    //          workers (AE_NWORKERS2, AE_NWORKERS3, AE_NWORKERS4, ...).
    -    //          You can add this definition to compiler command line or change
    -    //          corresponding project settings in your IDE.
    -    //
    -    // After you installed and configured SMP edition of ALGLIB, you may choose
    -    // between serial and multithreaded versions of SMP-capable functions:
    -    // * serial version works as usual, in the context of the calling thread
    -    // * multithreaded version (with "smp_" prefix) creates (or wakes up) worker
    -    //   threads, inserts task in the worker queue, and waits for completion of
    -    //   the task. All processing is done in context of worker thread(s).
    -    //
    -    // NOTE: because starting/stopping worker threads costs thousands of CPU cycles,
    -    //       you should not use multithreading for lightweight computational problems.
    -    //
    -    // NOTE: some old POSIX-compatible operating systems do not support
    -    //       sysconf(_SC_NPROCESSORS_ONLN) system call which is required in order
    -    //       to automatically determine number of active cores. On these systems
    -    //       you should specify number of cores manually at compile time.
    -    //       Without it ALGLIB will run in single-threaded mode.
    -    //
    -    // Now, back to our example. In this example we will show you:
    -    // * how to call SMP version of rmatrixgemm(). Because we work with tiny 2x2
    -    //   matrices, we won't expect to see ANY speedup from using multithreading.
    -    //   The only purpose of this demo is to show how to call SMP functions.
    -    // * how to modify number of worker threads used by ALGLIB
    -    //
    -    real_2d_array a = "[[2,1],[1,3]]";
    -    real_2d_array b = "[[2,1],[0,1]]";
    -    real_2d_array c = "[[0,0],[0,0]]";
    -    ae_int_t m = 2;
    -    ae_int_t n = 2;
    -    ae_int_t k = 2;
    -    double alpha = 1.0;
    -    ae_int_t ia = 0;
    -    ae_int_t ja = 0;
    -    ae_int_t optypea = 0;
    -    ae_int_t ib = 0;
    -    ae_int_t jb = 0;
    -    ae_int_t optypeb = 0;
    -    double beta = 0.0;
    -    ae_int_t ic = 0;
    -    ae_int_t jc = 0;
    -
    -    // serial code
    -    c = "[[0,0],[0,0]]";
    -    rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc);
    -
    -    // SMP code with default number of worker threads
    -    c = "[[0,0],[0,0]]";
    -    smp_rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc);
    -    printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[4,3],[2,4]]
    -
    -    // override number of worker threads - use two cores
    -    alglib::setnworkers(+2);
    -    c = "[[0,0],[0,0]]";
    -    smp_rmatrixgemm(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc);
    -    printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[4,3],[2,4]]
    -    return 0;
    -}
    -
    -
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we assume that you already know how to work with
    -    // rmatrixsyrk() function. Below we concentrate on its multithreading
    -    // capabilities.
    -    //
    -    // SMP edition of ALGLIB includes smp_rmatrixsyrk() - multithreaded
    -    // version of rmatrixsyrk() function. In the basic edition of ALGLIB
    -    // (GPL edition or commercial version without SMP support) this function
    -    // just calls single-threaded stub. So, you may call this function from
    -    // ANY edition of ALGLIB, but only in SMP edition it will work in really
    -    // multithreaded mode.
    -    //
    -    // In order to use multithreading, you have to:
    -    // 1) Install SMP edition of ALGLIB.
    -    // 2) This step is specific for C++ users: you should activate OS-specific
    -    //    capabilities of ALGLIB by defining AE_OS=AE_POSIX (for *nix systems)
    -    //    or AE_OS=AE_WINDOWS (for Windows systems).
    -    //    C# users do not have to perform this step because C# programs are
    -    //    portable across different systems without OS-specific tuning.
    -    // 3) Allow ALGLIB to know about number of worker threads to use:
    -    //    a) autodetection (C++, C#):
    -    //          ALGLIB will automatically determine number of CPU cores and
    -    //          (by default) will use all cores except for one. Say, on 4-core
    -    //          system it will use three cores - unless you manually told it
    -    //          to use more or less. It will keep your system responsive during
    -    //          lengthy computations.
    -    //          Such behavior may be changed with setnworkers() call:
    -    //          * alglib::setnworkers(0)  = use all cores
    -    //          * alglib::setnworkers(-1) = leave one core unused
    -    //          * alglib::setnworkers(-2) = leave two cores unused
    -    //          * alglib::setnworkers(+2) = use 2 cores (even if you have more)
    -    //    b) manual specification (C++, C#):
    -    //          You may want to specify maximum number of worker threads during
    -    //          compile time by means of preprocessor definition AE_NWORKERS.
    -    //          For C++ it will be "AE_NWORKERS=X" where X can be any positive number.
    -    //          For C# it is "AE_NWORKERSX", where X should be replaced by number of
    -    //          workers (AE_NWORKERS2, AE_NWORKERS3, AE_NWORKERS4, ...).
    -    //          You can add this definition to compiler command line or change
    -    //          corresponding project settings in your IDE.
    -    //
    -    // After you installed and configured SMP edition of ALGLIB, you may choose
    -    // between serial and multithreaded versions of SMP-capable functions:
    -    // * serial version works as usual, in the context of the calling thread
    -    // * multithreaded version (with "smp_" prefix) creates (or wakes up) worker
    -    //   threads, inserts task in the worker queue, and waits for completion of
    -    //   the task. All processing is done in context of worker thread(s).
    -    //
    -    // NOTE: because starting/stopping worker threads costs thousands of CPU cycles,
    -    //       you should not use multithreading for lightweight computational problems.
    -    //
    -    // NOTE: some old POSIX-compatible operating systems do not support
    -    //       sysconf(_SC_NPROCESSORS_ONLN) system call which is required in order
    -    //       to automatically determine number of active cores. On these systems
    -    //       you should specify number of cores manually at compile time.
    -    //       Without it ALGLIB will run in single-threaded mode.
    -    //
    -    // Now, back to our example. In this example we will show you:
    -    // * how to call SMP version of rmatrixsyrk(). Because we work with tiny 2x2
    -    //   matrices, we won't expect to see ANY speedup from using multithreading.
    -    //   The only purpose of this demo is to show how to call SMP functions.
    -    // * how to modify number of worker threads used by ALGLIB
    -    //
    -    ae_int_t n = 2;
    -    ae_int_t k = 1;
    -    double alpha = 1.0;
    -    ae_int_t ia = 0;
    -    ae_int_t ja = 0;
    -    ae_int_t optypea = 2;
    -    double beta = 0.0;
    -    ae_int_t ic = 0;
    -    ae_int_t jc = 0;
    -    bool isupper = true;
    -    real_2d_array a = "[[1,2]]";
    -    real_2d_array c = "[[]]";
    -
    -    //
    -    // Default number of worker threads.
    -    // Preallocate space to store result, call multithreaded version, test.
    -    //
    -    // NOTE: this function updates only one triangular part of C. In our
    -    //       example we choose to update upper triangle.
    -    //
    -    c = "[[0,0],[0,0]]";
    -    smp_rmatrixsyrk(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper);
    -    printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[1,2],[0,4]]
    -
    -    //
    -    // Override default number of worker threads (set to 2).
    -    // Preallocate space to store result, call multithreaded version, test.
    -    //
    -    // NOTE: this function updates only one triangular part of C. In our
    -    //       example we choose to update upper triangle.
    -    //
    -    alglib::setnworkers(+2);
    -    c = "[[0,0],[0,0]]";
    -    smp_rmatrixsyrk(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper);
    -    printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [[1,2],[0,4]]
    -    return 0;
    -}
    -
    -
     
    @@ -3404,7 +3516,8 @@ double& ai, double& aip, double& bi, - double& bip); + double& bip, + const xparams _params = alglib::xdefault);
    @@ -3474,7 +3587,7 @@ *************************************************************************/
    void autogkintegrate(autogkstate &state, void (*func)(double x, double xminusa, double bminusx, double &y, void *ptr), - void *ptr = NULL); + void *ptr = NULL, const xparams _xparams = alglib::xdefault);

    Examples:   [1]  

    @@ -3497,7 +3610,8 @@
    void alglib::autogkresults( autogkstate state, double& v, - autogkreport& rep); + autogkreport& rep, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3540,7 +3654,8 @@ double b, double alpha, double beta, - autogkstate& state); + autogkstate& state, + const xparams _params = alglib::xdefault); @@ -3571,7 +3686,11 @@ -- ALGLIB -- Copyright 06.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::autogksmooth(double a, double b, autogkstate& state); +
    void alglib::autogksmooth( + double a, + double b, + autogkstate& state, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3604,7 +3723,8 @@ double a, double b, double xwidth, - autogkstate& state); + autogkstate& state, + const xparams _params = alglib::xdefault); @@ -3694,8 +3814,15 @@ -- ALGLIB -- Copyright 28.10.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cov2(real_1d_array x, real_1d_array y); -double alglib::cov2(real_1d_array x, real_1d_array y, ae_int_t n); +
    double alglib::cov2( + real_1d_array x, + real_1d_array y, + const xparams _params = alglib::xdefault); +double alglib::cov2( + real_1d_array x, + real_1d_array y, + ae_int_t n, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3704,24 +3831,18 @@
    /************************************************************************* Covariance matrix -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! with covariance matrices smaller than 128*128. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - array[N,M], sample matrix: @@ -3740,18 +3861,16 @@ -- ALGLIB -- Copyright 28.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::covm(real_2d_array x, real_2d_array& c); -void alglib::covm( +
    void alglib::covm( real_2d_array x, - ae_int_t n, - ae_int_t m, - real_2d_array& c); -void alglib::smp_covm(real_2d_array x, real_2d_array& c); -void alglib::smp_covm( + real_2d_array& c, + const xparams _params = alglib::xdefault); +void alglib::covm( real_2d_array x, ae_int_t n, ae_int_t m, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3760,24 +3879,18 @@
    /************************************************************************* Cross-covariance matrix -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! with covariance matrices smaller than 128*128. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - array[N,M1], sample matrix: @@ -3802,22 +3915,19 @@ -- ALGLIB -- Copyright 28.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::covm2(real_2d_array x, real_2d_array y, real_2d_array& c); -void alglib::covm2( +
    void alglib::covm2( real_2d_array x, real_2d_array y, - ae_int_t n, - ae_int_t m1, - ae_int_t m2, - real_2d_array& c); -void alglib::smp_covm2(real_2d_array x, real_2d_array y, real_2d_array& c); -void alglib::smp_covm2( + real_2d_array& c, + const xparams _params = alglib::xdefault); +void alglib::covm2( real_2d_array x, real_2d_array y, ae_int_t n, ae_int_t m1, ae_int_t m2, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3840,8 +3950,15 @@ -- ALGLIB -- Copyright 28.10.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::pearsoncorr2(real_1d_array x, real_1d_array y); -double alglib::pearsoncorr2(real_1d_array x, real_1d_array y, ae_int_t n); +
    double alglib::pearsoncorr2( + real_1d_array x, + real_1d_array y, + const xparams _params = alglib::xdefault); +double alglib::pearsoncorr2( + real_1d_array x, + real_1d_array y, + ae_int_t n, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3856,7 +3973,8 @@
    double alglib::pearsoncorrelation( real_1d_array x, real_1d_array y, - ae_int_t n); + ae_int_t n, + const xparams _params = alglib::xdefault);
    @@ -3864,24 +3982,18 @@
    /************************************************************************* Pearson product-moment correlation matrix -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! with correlation matrices smaller than 128*128. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - array[N,M], sample matrix: @@ -3900,18 +4012,16 @@ -- ALGLIB -- Copyright 28.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pearsoncorrm(real_2d_array x, real_2d_array& c); -void alglib::pearsoncorrm( +
    void alglib::pearsoncorrm( real_2d_array x, - ae_int_t n, - ae_int_t m, - real_2d_array& c); -void alglib::smp_pearsoncorrm(real_2d_array x, real_2d_array& c); -void alglib::smp_pearsoncorrm( + real_2d_array& c, + const xparams _params = alglib::xdefault); +void alglib::pearsoncorrm( real_2d_array x, ae_int_t n, ae_int_t m, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3920,24 +4030,18 @@
    /************************************************************************* Pearson product-moment cross-correlation matrix -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! with correlation matrices smaller than 128*128. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - array[N,M1], sample matrix: @@ -3965,25 +4069,16 @@
    void alglib::pearsoncorrm2( real_2d_array x, real_2d_array y, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault); void alglib::pearsoncorrm2( real_2d_array x, real_2d_array y, ae_int_t n, ae_int_t m1, ae_int_t m2, - real_2d_array& c); -void alglib::smp_pearsoncorrm2( - real_2d_array x, - real_2d_array y, - real_2d_array& c); -void alglib::smp_pearsoncorrm2( - real_2d_array x, - real_2d_array y, - ae_int_t n, - ae_int_t m1, - ae_int_t m2, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -3997,24 +4092,16 @@ * ranking starts from 0, ends at NFeatures-1 * sum of within-row values is equal to (NFeatures-1)*NFeatures/2 -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! ones where expected operations count is less than 100.000 + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: XY - array[NPoints,NFeatures], dataset @@ -4028,16 +4115,14 @@ -- ALGLIB -- Copyright 18.04.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rankdata(real_2d_array& xy); -void alglib::rankdata( +
    void alglib::rankdata( real_2d_array& xy, - ae_int_t npoints, - ae_int_t nfeatures); -void alglib::smp_rankdata(real_2d_array& xy); -void alglib::smp_rankdata( + const xparams _params = alglib::xdefault); +void alglib::rankdata( real_2d_array& xy, ae_int_t npoints, - ae_int_t nfeatures); + ae_int_t nfeatures, + const xparams _params = alglib::xdefault);
    @@ -4052,24 +4137,16 @@ * centering is performed by subtracting mean from each row, i.e it changes mean value, but does NOT change higher moments -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! ones where expected operations count is less than 100.000 + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: XY - array[NPoints,NFeatures], dataset @@ -4083,16 +4160,14 @@ -- ALGLIB -- Copyright 18.04.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rankdatacentered(real_2d_array& xy); -void alglib::rankdatacentered( +
    void alglib::rankdatacentered( real_2d_array& xy, - ae_int_t npoints, - ae_int_t nfeatures); -void alglib::smp_rankdatacentered(real_2d_array& xy); -void alglib::smp_rankdatacentered( + const xparams _params = alglib::xdefault); +void alglib::rankdatacentered( real_2d_array& xy, ae_int_t npoints, - ae_int_t nfeatures); + ae_int_t nfeatures, + const xparams _params = alglib::xdefault);
    @@ -4112,8 +4187,15 @@ -- ALGLIB -- Copyright 06.09.2006 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sampleadev(real_1d_array x, double& adev); -void alglib::sampleadev(real_1d_array x, ae_int_t n, double& adev); +
    void alglib::sampleadev( + real_1d_array x, + double& adev, + const xparams _params = alglib::xdefault); +void alglib::sampleadev( + real_1d_array x, + ae_int_t n, + double& adev, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4137,8 +4219,13 @@ -- ALGLIB -- Copyright 06.09.2006 by Bochkanov Sergey *************************************************************************/ -
    double alglib::samplekurtosis(real_1d_array x); -double alglib::samplekurtosis(real_1d_array x, ae_int_t n); +
    double alglib::samplekurtosis( + real_1d_array x, + const xparams _params = alglib::xdefault); +double alglib::samplekurtosis( + real_1d_array x, + ae_int_t n, + const xparams _params = alglib::xdefault);
    @@ -4161,8 +4248,13 @@ -- ALGLIB -- Copyright 06.09.2006 by Bochkanov Sergey *************************************************************************/ -
    double alglib::samplemean(real_1d_array x); -double alglib::samplemean(real_1d_array x, ae_int_t n); +
    double alglib::samplemean( + real_1d_array x, + const xparams _params = alglib::xdefault); +double alglib::samplemean( + real_1d_array x, + ae_int_t n, + const xparams _params = alglib::xdefault);
    @@ -4182,8 +4274,15 @@ -- ALGLIB -- Copyright 06.09.2006 by Bochkanov Sergey *************************************************************************/ -
    void alglib::samplemedian(real_1d_array x, double& median); -void alglib::samplemedian(real_1d_array x, ae_int_t n, double& median); +
    void alglib::samplemedian( + real_1d_array x, + double& median, + const xparams _params = alglib::xdefault); +void alglib::samplemedian( + real_1d_array x, + ae_int_t n, + double& median, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4214,14 +4313,16 @@ double& mean, double& variance, double& skewness, - double& kurtosis); + double& kurtosis, + const xparams _params = alglib::xdefault); void alglib::samplemoments( real_1d_array x, ae_int_t n, double& mean, double& variance, double& skewness, - double& kurtosis); + double& kurtosis, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4243,12 +4344,17 @@ -- ALGLIB -- Copyright 01.03.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::samplepercentile(real_1d_array x, double p, double& v); +
    void alglib::samplepercentile( + real_1d_array x, + double p, + double& v, + const xparams _params = alglib::xdefault); void alglib::samplepercentile( real_1d_array x, ae_int_t n, double p, - double& v); + double& v, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4272,8 +4378,13 @@ -- ALGLIB -- Copyright 06.09.2006 by Bochkanov Sergey *************************************************************************/ -
    double alglib::sampleskewness(real_1d_array x); -double alglib::sampleskewness(real_1d_array x, ae_int_t n); +
    double alglib::sampleskewness( + real_1d_array x, + const xparams _params = alglib::xdefault); +double alglib::sampleskewness( + real_1d_array x, + ae_int_t n, + const xparams _params = alglib::xdefault);
    @@ -4296,8 +4407,13 @@ -- ALGLIB -- Copyright 06.09.2006 by Bochkanov Sergey *************************************************************************/ -
    double alglib::samplevariance(real_1d_array x); -double alglib::samplevariance(real_1d_array x, ae_int_t n); +
    double alglib::samplevariance( + real_1d_array x, + const xparams _params = alglib::xdefault); +double alglib::samplevariance( + real_1d_array x, + ae_int_t n, + const xparams _params = alglib::xdefault);
    @@ -4319,11 +4435,15 @@ -- ALGLIB -- Copyright 09.04.2007 by Bochkanov Sergey *************************************************************************/ -
    double alglib::spearmancorr2(real_1d_array x, real_1d_array y); +
    double alglib::spearmancorr2( + real_1d_array x, + real_1d_array y, + const xparams _params = alglib::xdefault); double alglib::spearmancorr2( real_1d_array x, real_1d_array y, - ae_int_t n); + ae_int_t n, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4332,24 +4452,18 @@
    /************************************************************************* Spearman's rank correlation matrix -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! with correlation matrices smaller than 128*128. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - array[N,M], sample matrix: @@ -4368,18 +4482,16 @@ -- ALGLIB -- Copyright 28.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spearmancorrm(real_2d_array x, real_2d_array& c); -void alglib::spearmancorrm( +
    void alglib::spearmancorrm( real_2d_array x, - ae_int_t n, - ae_int_t m, - real_2d_array& c); -void alglib::smp_spearmancorrm(real_2d_array x, real_2d_array& c); -void alglib::smp_spearmancorrm( + real_2d_array& c, + const xparams _params = alglib::xdefault); +void alglib::spearmancorrm( real_2d_array x, ae_int_t n, ae_int_t m, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4388,24 +4500,18 @@
    /************************************************************************* Spearman's rank cross-correlation matrix -SMP EDITION OF ALGLIB: - - ! This function can utilize multicore capabilities of your system. In - ! order to do this you have to call version with "smp_" prefix, which - ! indicates that multicore code will be used. - ! - ! This note is given for users of SMP edition; if you use GPL edition, - ! or commercial edition of ALGLIB without SMP support, you still will - ! be able to call smp-version of this function, but all computations - ! will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - ! - ! You should remember that starting/stopping worker thread always have - ! non-zero cost. Although multicore version is pretty efficient on - ! large problems, we do not recommend you to use it on small problems - - ! with correlation matrices smaller than 128*128. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: X - array[N,M1], sample matrix: @@ -4433,25 +4539,16 @@
    void alglib::spearmancorrm2( real_2d_array x, real_2d_array y, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault); void alglib::spearmancorrm2( real_2d_array x, real_2d_array y, ae_int_t n, ae_int_t m1, ae_int_t m2, - real_2d_array& c); -void alglib::smp_spearmancorrm2( - real_2d_array x, - real_2d_array y, - real_2d_array& c); -void alglib::smp_spearmancorrm2( - real_2d_array x, - real_2d_array y, - ae_int_t n, - ae_int_t m1, - ae_int_t m2, - real_2d_array& c); + real_2d_array& c, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -4466,7 +4563,8 @@
    double alglib::spearmanrankcorrelation( real_1d_array x, real_1d_array y, - ae_int_t n); + ae_int_t n, + const xparams _params = alglib::xdefault);
    @@ -4683,7 +4781,8 @@ double& pbl, double& par, double& pbr, - double& cve); + double& cve, + const xparams _params = alglib::xdefault); @@ -4725,7 +4824,8 @@ ae_int_t& info, double& threshold, double& rms, - double& cvrms); + double& cvrms, + const xparams _params = alglib::xdefault); @@ -4852,7 +4952,8 @@ real_2d_array& c, ae_int_t ncc, real_2d_array& vt, - ae_int_t ncvt); + ae_int_t ncvt, + const xparams _params = alglib::xdefault); @@ -4895,7 +4996,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besseli0(double x); +
    double alglib::besseli0( + double x, + const xparams _params = alglib::xdefault);
    @@ -4921,7 +5024,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1985, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besseli1(double x); +
    double alglib::besseli1( + double x, + const xparams _params = alglib::xdefault);
    @@ -4956,7 +5061,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besselj0(double x); +
    double alglib::besselj0( + double x, + const xparams _params = alglib::xdefault);
    @@ -4981,7 +5088,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besselj1(double x); +
    double alglib::besselj1( + double x, + const xparams _params = alglib::xdefault);
    @@ -5013,7 +5122,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besseljn(ae_int_t n, double x); +
    double alglib::besseljn( + ae_int_t n, + double x, + const xparams _params = alglib::xdefault);
    @@ -5039,7 +5151,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besselk0(double x); +
    double alglib::besselk0( + double x, + const xparams _params = alglib::xdefault);
    @@ -5063,7 +5177,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besselk1(double x); +
    double alglib::besselk1( + double x, + const xparams _params = alglib::xdefault);
    @@ -5090,7 +5206,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1988, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besselkn(ae_int_t nn, double x); +
    double alglib::besselkn( + ae_int_t nn, + double x, + const xparams _params = alglib::xdefault);
    @@ -5123,7 +5242,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::bessely0(double x); +
    double alglib::bessely0( + double x, + const xparams _params = alglib::xdefault);
    @@ -5149,7 +5270,9 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::bessely1(double x); +
    double alglib::bessely1( + double x, + const xparams _params = alglib::xdefault);
    @@ -5176,7 +5299,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::besselyn(ae_int_t n, double x); +
    double alglib::besselyn( + ae_int_t n, + double x, + const xparams _params = alglib::xdefault);
    @@ -5210,7 +5336,10 @@ Cephes Math Library Release 2.0: April, 1987 Copyright 1984, 1987 by Stephen L. Moshier *************************************************************************/ -
    double alglib::beta(double a, double b); +
    double alglib::beta( + double a, + double b, + const xparams _params = alglib::xdefault);
    @@ -5257,7 +5386,11 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::binomialcdistribution(ae_int_t k, ae_int_t n, double p); +
    double alglib::binomialcdistribution( + ae_int_t k, + ae_int_t n, + double p, + const xparams _params = alglib::xdefault);
    @@ -5293,7 +5426,11 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::binomialdistribution(ae_int_t k, ae_int_t n, double p); +
    double alglib::binomialdistribution( + ae_int_t k, + ae_int_t n, + double p, + const xparams _params = alglib::xdefault);
    @@ -5326,7 +5463,11 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::invbinomialdistribution(ae_int_t k, ae_int_t n, double y); +
    double alglib::invbinomialdistribution( + ae_int_t k, + ae_int_t n, + double y, + const xparams _params = alglib::xdefault);
    @@ -5353,7 +5494,11 @@ Result: the value of the Chebyshev polynomial at x *************************************************************************/ -
    double alglib::chebyshevcalculate(ae_int_t r, ae_int_t n, double x); +
    double alglib::chebyshevcalculate( + ae_int_t r, + ae_int_t n, + double x, + const xparams _params = alglib::xdefault);
    @@ -5367,13 +5512,16 @@ Output parameters: C - coefficients *************************************************************************/ -
    void alglib::chebyshevcoefficients(ae_int_t n, real_1d_array& c); +
    void alglib::chebyshevcoefficients( + ae_int_t n, + real_1d_array& c, + const xparams _params = alglib::xdefault);
     
    /************************************************************************* -Summation of Chebyshev polynomials using Clenshaw’s recurrence formula. +Summation of Chebyshev polynomials using Clenshaw's recurrence formula. This routine calculates c[0]*T0(x) + c[1]*T1(x) + ... + c[N]*TN(x) @@ -5393,7 +5541,8 @@ real_1d_array c, ae_int_t r, ae_int_t n, - double x); + double x, + const xparams _params = alglib::xdefault);
    @@ -5411,7 +5560,11 @@ Output parameters B - power series coefficients *************************************************************************/ -
    void alglib::fromchebyshev(real_1d_array a, ae_int_t n, real_1d_array& b); +
    void alglib::fromchebyshev( + real_1d_array a, + ae_int_t n, + real_1d_array& b, + const xparams _params = alglib::xdefault);
    @@ -5456,7 +5609,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::chisquarecdistribution(double v, double x); +
    double alglib::chisquarecdistribution( + double v, + double x, + const xparams _params = alglib::xdefault);
    @@ -5494,7 +5650,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::chisquaredistribution(double v, double x); +
    double alglib::chisquaredistribution( + double v, + double x, + const xparams _params = alglib::xdefault);
    @@ -5519,7 +5678,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::invchisquaredistribution(double v, double y); +
    double alglib::invchisquaredistribution( + double v, + double y, + const xparams _params = alglib::xdefault);
    @@ -5541,6 +5703,7 @@ clusterizersetkmeansinit
    clusterizersetkmeanslimits
    clusterizersetpoints
    +clusterizersetseed
    @@ -5745,7 +5908,9 @@ -- ALGLIB -- Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::clusterizercreate(clusterizerstate& s); +
    void alglib::clusterizercreate( + clusterizerstate& s, + const xparams _params = alglib::xdefault);

    Examples:   [1]  [2]  [3]  [4]  [5]  

    @@ -5754,18 +5919,14 @@
    /************************************************************************* This function returns distance matrix for dataset -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Agglomerative hierarchical clustering algorithm has two phases: - ! distance matrix calculation and clustering itself. Only first phase - ! (distance matrix calculation) is accelerated by Intel MKL and multi- - ! threading. Thus, acceleration is significant only for medium or high- - ! dimensional problems. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -5814,13 +5975,8 @@ ae_int_t npoints, ae_int_t nfeatures, ae_int_t disttype, - real_2d_array& d); -void alglib::smp_clusterizergetdistances( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nfeatures, - ae_int_t disttype, - real_2d_array& d); + real_2d_array& d, + const xparams _params = alglib::xdefault);
    @@ -5872,7 +6028,8 @@ ahcreport rep, ae_int_t k, integer_1d_array& cidx, - integer_1d_array& cz); + integer_1d_array& cz, + const xparams _params = alglib::xdefault);

    Examples:   [1]  [2]  

    @@ -5881,23 +6038,29 @@
    /************************************************************************* This function performs agglomerative hierarchical clustering -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Agglomerative hierarchical clustering algorithm has two phases: - ! distance matrix calculation and clustering itself. Only first phase - ! (distance matrix calculation) is accelerated by Intel MKL and multi- - ! threading. Thus, acceleration is significant only for medium or high- - ! dimensional problems. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. +NOTE: Agglomerative hierarchical clustering algorithm has two phases: + distance matrix calculation and clustering itself. Only first phase + (distance matrix calculation) is accelerated by Intel MKL and + multithreading. Thus, acceleration is significant only for medium or + high-dimensional problems. + + Although activating multithreading gives some speedup over single- + threaded execution, you should not expect nearly-linear scaling + with respect to cores count. + INPUT PARAMETERS: S - clusterizer state, initialized by ClusterizerCreate() @@ -5917,8 +6080,10 @@ -- ALGLIB -- Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::clusterizerrunahc(clusterizerstate s, ahcreport& rep); -void alglib::smp_clusterizerrunahc(clusterizerstate s, ahcreport& rep); +
    void alglib::clusterizerrunahc( + clusterizerstate s, + ahcreport& rep, + const xparams _params = alglib::xdefault);

    Examples:   [1]  [2]  [3]  [4]  [5]  

    @@ -5934,27 +6099,26 @@ By default, one restart and unlimited number of iterations are used. Initialization algorithm is chosen automatically. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (can be used from C# and C++) - ! * access to high-performance C++ core (actual for C# users) - ! - ! K-means clustering algorithm has two phases: selection of initial - ! centers and clustering itself. ALGLIB parallelizes both phases. - ! Parallel version is optimized for the following scenario: medium or - ! high-dimensional problem (20 or more dimensions) with large number of - ! points and clusters. However, some speed-up can be obtained even when - ! assumptions above are violated. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! As for native-vs-managed comparison, working with native core brings - ! 30-40% improvement in speed over pure C# version of ALGLIB. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. +NOTE: k-means clustering algorithm has two phases: selection of initial + centers and clustering itself. ALGLIB parallelizes both phases. + Parallel version is optimized for the following scenario: medium or + high-dimensional problem (8 or more dimensions) with large number of + points and clusters. However, some speed-up can be obtained even + when assumptions above are violated. + INPUT PARAMETERS: S - clusterizer state, initialized by ClusterizerCreate() K - number of clusters, K>=0. @@ -5976,17 +6140,19 @@ to clusterizer with DistType other than Euclidean (or dataset was specified by distance matrix instead of explicitly given points). +NOTE 2: by default, k-means uses non-deterministic seed to initialize RNG + which is used to select initial centers. As result, each run of + algorithm may return different values. If you need deterministic + behavior, use ClusterizerSetSeed() function. + -- ALGLIB -- Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/
    void alglib::clusterizerrunkmeans( clusterizerstate s, ae_int_t k, - kmeansreport& rep); -void alglib::smp_clusterizerrunkmeans( - clusterizerstate s, - ae_int_t k, - kmeansreport& rep); + kmeansreport& rep, + const xparams _params = alglib::xdefault);
    @@ -6045,7 +6211,8 @@ double r, ae_int_t& k, integer_1d_array& cidx, - integer_1d_array& cz); + integer_1d_array& cz, + const xparams _params = alglib::xdefault); @@ -6104,7 +6271,8 @@ double r, ae_int_t& k, integer_1d_array& cidx, - integer_1d_array& cz); + integer_1d_array& cz, + const xparams _params = alglib::xdefault); @@ -6132,7 +6300,10 @@ -- ALGLIB -- Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::clusterizersetahcalgo(clusterizerstate s, ae_int_t algo); +
    void alglib::clusterizersetahcalgo( + clusterizerstate s, + ae_int_t algo, + const xparams _params = alglib::xdefault);

    Examples:   [1]  [2]  [3]  [4]  [5]  

    @@ -6170,12 +6341,14 @@
    void alglib::clusterizersetdistances( clusterizerstate s, real_2d_array d, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault); void alglib::clusterizersetdistances( clusterizerstate s, real_2d_array d, ae_int_t npoints, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault);

    Examples:   [1]  

    @@ -6208,7 +6381,8 @@ *************************************************************************/
    void alglib::clusterizersetkmeansinit( clusterizerstate s, - ae_int_t initalgo); + ae_int_t initalgo, + const xparams _params = alglib::xdefault);
    @@ -6232,7 +6406,8 @@
    void alglib::clusterizersetkmeanslimits( clusterizerstate s, ae_int_t restarts, - ae_int_t maxits); + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    @@ -6289,16 +6464,43 @@
    void alglib::clusterizersetpoints( clusterizerstate s, real_2d_array xy, - ae_int_t disttype); + ae_int_t disttype, + const xparams _params = alglib::xdefault); void alglib::clusterizersetpoints( clusterizerstate s, real_2d_array xy, ae_int_t npoints, ae_int_t nfeatures, - ae_int_t disttype); + ae_int_t disttype, + const xparams _params = alglib::xdefault);

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +This function sets seed which is used to initialize internal RNG. By +default, deterministic seed is used - same for each run of clusterizer. If +you specify non-deterministic seed value, then some algorithms which +depend on random initialization (in current version: k-means) may return +slightly different results after each run. + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + Seed - seed: + * positive values = use deterministic seed for each run of + algorithms which depend on random initialization + * zero or negative values = use non-deterministic seed + + -- ALGLIB -- + Copyright 08.06.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::clusterizersetseed( + clusterizerstate s, + ae_int_t seed, + const xparams _params = alglib::xdefault); + +
     #include "stdafx.h"
    @@ -6711,7 +6913,8 @@
         ae_int_t m,
         complex_1d_array b,
         ae_int_t n,
    -    complex_1d_array& r);
    +    complex_1d_array& r,
    +    const xparams _params = alglib::xdefault);
     
     
    @@ -6749,7 +6952,8 @@ ae_int_t m, complex_1d_array r, ae_int_t n, - complex_1d_array& c); + complex_1d_array& c, + const xparams _params = alglib::xdefault); @@ -6785,7 +6989,8 @@ ae_int_t m, complex_1d_array b, ae_int_t n, - complex_1d_array& r); + complex_1d_array& r, + const xparams _params = alglib::xdefault); @@ -6821,7 +7026,8 @@ ae_int_t m, complex_1d_array b, ae_int_t n, - complex_1d_array& r); + complex_1d_array& r, + const xparams _params = alglib::xdefault); @@ -6853,7 +7059,8 @@ ae_int_t m, real_1d_array b, ae_int_t n, - real_1d_array& r); + real_1d_array& r, + const xparams _params = alglib::xdefault); @@ -6885,7 +7092,8 @@ ae_int_t m, real_1d_array r, ae_int_t n, - real_1d_array& c); + real_1d_array& c, + const xparams _params = alglib::xdefault); @@ -6921,7 +7129,8 @@ ae_int_t m, real_1d_array b, ae_int_t n, - real_1d_array& r); + real_1d_array& r, + const xparams _params = alglib::xdefault); @@ -6957,7 +7166,8 @@ ae_int_t m, real_1d_array b, ae_int_t n, - real_1d_array& r); + real_1d_array& r, + const xparams _params = alglib::xdefault); @@ -7013,7 +7223,8 @@ ae_int_t n, complex_1d_array pattern, ae_int_t m, - complex_1d_array& r); + complex_1d_array& r, + const xparams _params = alglib::xdefault); @@ -7050,7 +7261,8 @@ ae_int_t m, complex_1d_array pattern, ae_int_t n, - complex_1d_array& c); + complex_1d_array& c, + const xparams _params = alglib::xdefault); @@ -7096,7 +7308,8 @@ ae_int_t n, real_1d_array pattern, ae_int_t m, - real_1d_array& r); + real_1d_array& r, + const xparams _params = alglib::xdefault); @@ -7133,7 +7346,8 @@ ae_int_t m, real_1d_array pattern, ae_int_t n, - real_1d_array& c); + real_1d_array& c, + const xparams _params = alglib::xdefault); @@ -7187,7 +7401,8 @@ ae_int_t n, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -7235,7 +7450,8 @@ ae_int_t n, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -7263,7 +7479,8 @@ ae_int_t restarts, ae_int_t& info, real_2d_array& c, - integer_1d_array& xyc); + integer_1d_array& xyc, + const xparams _params = alglib::xdefault); @@ -7300,1462 +7517,1505 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::dawsonintegral(double x); +
    double alglib::dawsonintegral( + double x, + const xparams _params = alglib::xdefault);
    - +
    -densesolverlsreport
    -densesolverreport
    +decisionforest
    +decisionforestbuffer
    +decisionforestbuilder
    +dfreport
    -cmatrixlusolve
    -cmatrixlusolvefast
    -cmatrixlusolvem
    -cmatrixlusolvemfast
    -cmatrixmixedsolve
    -cmatrixmixedsolvem
    -cmatrixsolve
    -cmatrixsolvefast
    -cmatrixsolvem
    -cmatrixsolvemfast
    -hpdmatrixcholeskysolve
    -hpdmatrixcholeskysolvefast
    -hpdmatrixcholeskysolvem
    -hpdmatrixcholeskysolvemfast
    -hpdmatrixsolve
    -hpdmatrixsolvefast
    -hpdmatrixsolvem
    -hpdmatrixsolvemfast
    -rmatrixlusolve
    -rmatrixlusolvefast
    -rmatrixlusolvem
    -rmatrixlusolvemfast
    -rmatrixmixedsolve
    -rmatrixmixedsolvem
    -rmatrixsolve
    -rmatrixsolvefast
    -rmatrixsolvels
    -rmatrixsolvem
    -rmatrixsolvemfast
    -spdmatrixcholeskysolve
    -spdmatrixcholeskysolvefast
    -spdmatrixcholeskysolvem
    -spdmatrixcholeskysolvemfast
    -spdmatrixsolve
    -spdmatrixsolvefast
    -spdmatrixsolvem
    -spdmatrixsolvemfast
    +dfavgce
    +dfavgerror
    +dfavgrelerror
    +dfbinarycompression
    +dfbuilderbuildrandomforest
    +dfbuildercreate
    +dfbuildergetprogress
    +dfbuilderpeekprogress
    +dfbuildersetdataset
    +dfbuildersetimportancenone
    +dfbuildersetimportanceoobgini
    +dfbuildersetimportancepermutation
    +dfbuildersetimportancetrngini
    +dfbuildersetrdfalgo
    +dfbuildersetrdfsplitstrength
    +dfbuildersetrndvars
    +dfbuildersetrndvarsauto
    +dfbuildersetrndvarsratio
    +dfbuildersetseed
    +dfbuildersetsubsampleratio
    +dfbuildrandomdecisionforest
    +dfbuildrandomdecisionforestx1
    +dfclassify
    +dfcreatebuffer
    +dfprocess
    +dfprocess0
    +dfprocessi
    +dfrelclserror
    +dfrmserror
    +dfserialize
    +dftsprocess
    +dfunserialize
    clst_ahc Simple hierarchical clusterization with Euclidean distance function
    + +
    randomforest_cls Simple classification with random forests
    randomforest_reg Simple classification with decision forest
    - +
     
    /************************************************************************* - +Decision forest (random forest) model. *************************************************************************/ -
    class densesolverlsreport +
    class decisionforest { - double r2; - real_2d_array cx; - ae_int_t n; - ae_int_t k; };
    - +
     
    /************************************************************************* +Buffer object which is used to perform various requests (usually model +inference) in the multithreaded mode (multiple threads working with same +DF object). +This object should be created with DFCreateBuffer(). *************************************************************************/ -
    class densesolverreport +
    class decisionforestbuffer { - double r1; - double rinf; };
    - +
     
    /************************************************************************* -Complex dense linear solver for A*x=b with complex N*N A given by its LU -decomposition and N*1 vectors x and b. This is "slow-but-robust" version -of the complex linear solver with additional features which add -significant performance overhead. Faster version is CMatrixLUSolveFast() -function. +A random forest (decision forest) builder object. -Algorithm features: -* automatic detection of degenerate cases -* O(N^2) complexity -* condition number estimation +Used to store dataset and specify decision forest training algorithm settings. +*************************************************************************/ +
    class decisionforestbuilder +{ +}; -No iterative refinement is provided because exact form of original matrix -is not known to subroutine. Use CMatrixSolve or CMatrixMixedSolve if you -need iterative refinement. +
    + +
    +
    /************************************************************************* +Decision forest training report. -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system, - ! which results in 10-15x performance penalty when compared - ! with "fast" version which just calls triangular solver. - ! - ! This performance penalty is insignificant when compared with - ! cost of large LU decomposition. However, if you call this - ! function many times for the same left side, this overhead - ! BECOMES significant. It also becomes significant for small- - ! scale problems. - ! - ! In such cases we strongly recommend you to use faster solver, - ! CMatrixLUSolveFast() function. +=== training/oob errors ================================================== -INPUT PARAMETERS - LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result - P - array[0..N-1], pivots array, CMatrixLU result - N - size of A - B - array[0..N-1], right part +Following fields store training set errors: +* relclserror - fraction of misclassified cases, [0,1] +* avgce - average cross-entropy in bits per symbol +* rmserror - root-mean-square error +* avgerror - average error +* avgrelerror - average relative error + +Out-of-bag estimates are stored in fields with same names, but "oob" prefix. + +For classification problems: +* RMS, AVG and AVGREL errors are calculated for posterior probabilities + +For regression problems: +* RELCLS and AVGCE errors are zero + +=== variable importance ================================================== + +Following fields are used to store variable importance information: + +* topvars - variables ordered from the most important to + less important ones (according to current + choice of importance raiting). + For example, topvars[0] contains index of the + most important variable, and topvars[0:2] are + indexes of 3 most important ones and so on. + +* varimportances - array[nvars], ratings (the larger, the more + important the variable is, always in [0,1] + range). + By default, filled by zeros (no importance + ratings are provided unless you explicitly + request them). + Zero rating means that variable is not important, + however you will rarely encounter such a thing, + in many cases unimportant variables produce + nearly-zero (but nonzero) ratings. + +Variable importance report must be EXPLICITLY requested by calling: +* dfbuildersetimportancegini() function, if you need out-of-bag Gini-based + importance rating also known as MDI (fast to calculate, resistant to + overfitting issues, but has some bias towards continuous and + high-cardinality categorical variables) +* dfbuildersetimportancetrngini() function, if you need training set Gini- + -based importance rating (what other packages typically report). +* dfbuildersetimportancepermutation() function, if you need permutation- + based importance rating also known as MDA (slower to calculate, but less + biased) +* dfbuildersetimportancenone() function, if you do not need importance + ratings - ratings will be zero, topvars[] will be [0,1,2,...] + +Different importance ratings (Gini or permutation) produce non-comparable +values. Although in all cases rating values lie in [0,1] range, there are +exist differences: +* informally speaking, Gini importance rating tends to divide "unit amount + of importance" between several important variables, i.e. it produces + estimates which roughly sum to 1.0 (or less than 1.0, if your task can + not be solved exactly). If all variables are equally important, they + will have same rating, roughly 1/NVars, even if every variable is + critically important. +* from the other side, permutation importance tells us what percentage of + the model predictive power will be ruined by permuting this specific + variable. It does not produce estimates which sum to one. Critically + important variable will have rating close to 1.0, and you may have + multiple variables with such a rating. -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or exactly singular. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N], it contains: - * info>0 => solution - * info=-3 => filled by zeros +More information on variable importance ratings can be found in comments +on the dfbuildersetimportancegini() and dfbuildersetimportancepermutation() +functions. +*************************************************************************/ +
    class dfreport +{ + double relclserror; + double avgce; + double rmserror; + double avgerror; + double avgrelerror; + double oobrelclserror; + double oobavgce; + double oobrmserror; + double oobavgerror; + double oobavgrelerror; + integer_1d_array topvars; + real_1d_array varimportances; +}; + +
    + +
    +
    /************************************************************************* +Average cross-entropy (in bits per element) on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + CrossEntropy/(NPoints*LN(2)). + Zero if model solves regression task. -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 16.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlusolve( - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_1d_array b, - ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); +
    double alglib::dfavgce( + decisionforest df, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complex dense linear solver for A*x=b with N*N complex A given by its LU -decomposition and N*1 vectors x and b. This is fast lightweight version -of solver, which is significantly faster than CMatrixLUSolve(), but does -not provide additional information (like condition numbers). +Average error on the test set -Algorithm features: -* O(N^2) complexity -* no additional time-consuming features, just triangular solver +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size -INPUT PARAMETERS - LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result - P - array[0..N-1], pivots array, CMatrixLU result - N - size of A - B - array[0..N-1], right part +RESULT: + Its meaning for regression task is obvious. As for + classification task, it means average error when estimating posterior + probabilities. -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is exactly singular (ill conditioned matrices - are not recognized). - * -1 N<=0 was passed - * 1 task is solved - B - array[N]: - * info>0 => overwritten by solution - * info=-3 => filled by zeros + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::dfavgerror( + decisionforest df, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); -NOTE: unlike CMatrixLUSolve(), this function does NOT check for - near-degeneracy of input matrix. It checks for EXACT degeneracy, - because this check is easy to do. However, very badly conditioned - matrices may went unnoticed. +
    + +
    +
    /************************************************************************* +Average relative error on the test set +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + Its meaning for regression task is obvious. As for + classification task, it means average relative error when estimating + posterior probability of belonging to the correct class. -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 16.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlusolvefast( - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_1d_array& b, - ae_int_t& info); +
    double alglib::dfavgrelerror( + decisionforest df, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B with N*N complex A given by its LU decomposition, -and N*M matrices X and B (multiple right sides). "Slow-but-feature-rich" -version of the solver. +This function performs binary compression of the decision forest. -Algorithm features: -* automatic detection of degenerate cases -* O(M*N^2) complexity -* condition number estimation +Original decision forest produced by the forest builder is stored using +64-bit representation for all numbers - offsets, variable indexes, split +points. -No iterative refinement is provided because exact form of original matrix -is not known to subroutine. Use CMatrixSolve or CMatrixMixedSolve if you -need iterative refinement. +It is possible to significantly reduce model size by means of: +* using compressed dynamic encoding for integers (offsets and variable + indexes), which uses just 1 byte to store small ints (less than 128), + just 2 bytes for larger values (less than 128^2) and so on +* storing floating point numbers using 8-bit exponent and 16-bit mantissa -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system, - ! which results in significant performance penalty when - ! compared with "fast" version which just calls triangular - ! solver. - ! - ! This performance penalty is especially apparent when you use - ! ALGLIB parallel capabilities (condition number estimation is - ! inherently sequential). It also becomes significant for - ! small-scale problems. - ! - ! In such cases we strongly recommend you to use faster solver, - ! CMatrixLUSolveMFast() function. +As result, model needs significantly less memory (compression factor +depends on variable and class counts). In particular: +* NVars<128 and NClasses<128 result in 4.4x-5.7x model size reduction +* NVars<16384 and NClasses<128 result in 3.7x-4.5x model size reduction -COMMERCIAL EDITION OF ALGLIB: +Such storage format performs lossless compression of all integers, but +compression of floating point values (split values) is lossy, with roughly +0.01% relative error introduced during rounding. Thus, we recommend you to +re-evaluate model accuracy after compression. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Triangular solver is relatively easy to parallelize. - ! However, parallelization will be efficient only for large number of - ! right parts M. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Another downside of compression is ~1.5x reduction in the inference +speed due to necessity of dynamic decompression of the compressed model. -INPUT PARAMETERS - LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result - P - array[0..N-1], pivots array, RMatrixLU result - N - size of A - B - array[0..N-1,0..M-1], right part - M - right part size +INPUT PARAMETERS: + DF - decision forest built by forest builder -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or exactly singular. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N,M], it contains: - * info>0 => solution - * info=-3 => filled by zeros +OUTPUT PARAMETERS: + DF - replaced by compressed forest + +RESULT: + compression factor (in-RAM size of the compressed model vs than of the + uncompressed one), positive number larger than 1.0 -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 22.07.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlusolvem( - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); -void alglib::smp_cmatrixlusolvem( - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); +
    double alglib::dfbinarycompression( + decisionforest df, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B with N*N complex A given by its LU decomposition, -and N*M matrices X and B (multiple right sides). "Fast-but-lightweight" -version of the solver. +This subroutine builds decision forest according to current settings using +dataset internally stored in the builder object. Dense algorithm is used. -Algorithm features: -* O(M*N^2) complexity -* no additional time-consuming features +NOTE: this function uses dense algorithm for forest construction + independently from the dataset format (dense or sparse). -COMMERCIAL EDITION OF ALGLIB: +NOTE: forest built with this function is stored in-memory using 64-bit + data structures for offsets/indexes/split values. It is possible to + convert forest into more memory-efficient compressed binary + representation. Depending on the problem properties, 3.7x-5.7x + compression factors are possible. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + The downsides of compression are (a) slight reduction in the model + accuracy and (b) ~1.5x reduction in the inference speed (due to + increased complexity of the storage format). + + See comments on dfbinarycompression() for more info. + +Default settings are used by the algorithm; you can tweak them with the +help of the following functions: +* dfbuildersetrfactor() - to control a fraction of the dataset used for + subsampling +* dfbuildersetrandomvars() - to control number of variables randomly chosen + for decision rule creation + + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Triangular solver is relatively easy to parallelize. - ! However, parallelization will be efficient only for large number of - ! right parts M. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS - LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result - P - array[0..N-1], pivots array, RMatrixLU result - N - size of A - B - array[0..N-1,0..M-1], right part - M - right part size +INPUT PARAMETERS: + S - decision forest builder object + NTrees - NTrees>=1, number of trees to train -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is exactly singular (ill conditioned matrices - are not recognized). - * -1 N<=0 was passed - * 1 task is solved - B - array[N,M]: - * info>0 => overwritten by solution - * info=-3 => filled by zeros +OUTPUT PARAMETERS: + DF - decision forest. You can compress this forest to more + compact 16-bit representation with dfbinarycompression() + Rep - report, see below for information on its fields. + +=== report information produced by forest construction function ========== +Decision forest training report includes following information: +* training set errors +* out-of-bag estimates of errors +* variable importance ratings + +Following fields are used to store information: +* training set errors are stored in rep.relclserror, rep.avgce, rep.rmserror, + rep.avgerror and rep.avgrelerror +* out-of-bag estimates of errors are stored in rep.oobrelclserror, rep.oobavgce, + rep.oobrmserror, rep.oobavgerror and rep.oobavgrelerror + +Variable importance reports, if requested by dfbuildersetimportancegini(), +dfbuildersetimportancetrngini() or dfbuildersetimportancepermutation() +call, are stored in: +* rep.varimportances field stores importance ratings +* rep.topvars stores variable indexes ordered from the most important to + less important ones + +You can find more information about report fields in: +* comments on dfreport structure +* comments on dfbuildersetimportancegini function +* comments on dfbuildersetimportancetrngini function +* comments on dfbuildersetimportancepermutation function -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlusolvemfast( - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_cmatrixlusolvemfast( - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); +
    void alglib::dfbuilderbuildrandomforest( + decisionforestbuilder s, + ae_int_t ntrees, + decisionforest& df, + dfreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver. Same as RMatrixMixedSolve(), but for complex matrices. +This subroutine creates DecisionForestBuilder object which is used to +train decision forests. -Algorithm features: -* automatic detection of degenerate cases -* condition number estimation -* iterative refinement -* O(N^2) complexity +By default, new builder stores empty dataset and some reasonable default +settings. At the very least, you should specify dataset prior to building +decision forest. You can also tweak settings of the forest construction +algorithm (recommended, although default setting should work well). -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result - P - array[0..N-1], pivots array, CMatrixLU result - N - size of A - B - array[0..N-1], right part +Following actions are mandatory: +* calling dfbuildersetdataset() to specify dataset +* calling dfbuilderbuildrandomforest() to build decision forest using + current dataset and default settings -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or exactly singular. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N], it contains: - * info>0 => solution - * info=-3 => filled by zeros +Additionally, you may call: +* dfbuildersetrndvars() or dfbuildersetrndvarsratio() to specify number of + variables randomly chosen for each split +* dfbuildersetsubsampleratio() to specify fraction of the dataset randomly + subsampled to build each tree +* dfbuildersetseed() to control random seed chosen for tree construction + +INPUT PARAMETERS: + none + +OUTPUT PARAMETERS: + S - decision forest builder -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixmixedsolve( - complex_2d_array a, - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_1d_array b, - ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); +
    void alglib::dfbuildercreate( + decisionforestbuilder& s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver. Same as RMatrixMixedSolveM(), but for complex matrices. +This function is an alias for dfbuilderpeekprogress(), left in ALGLIB for +backward compatibility reasons. -Algorithm features: -* automatic detection of degenerate cases -* condition number estimation -* iterative refinement -* O(M*N^2) complexity + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::dfbuildergetprogress( + decisionforestbuilder s, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result - P - array[0..N-1], pivots array, CMatrixLU result - N - size of A - B - array[0..N-1,0..M-1], right part - M - right part size +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function is used to peek into decision forest construction process +from some other thread and get current progress indicator. -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or exactly singular. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N,M], it contains: - * info>0 => solution - * info=-3 => filled by zeros +It returns value in [0,1]. + +INPUT PARAMETERS: + S - decision forest builder object used to build forest + in some other thread + +RESULT: + progress value, in [0,1] -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixmixedsolvem( - complex_2d_array a, - complex_2d_array lua, - integer_1d_array p, - ae_int_t n, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); +
    double alglib::dfbuilderpeekprogress( + decisionforestbuilder s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complex dense solver for A*x=B with N*N complex matrix A and N*1 complex -vectors x and b. "Slow-but-feature-rich" version of the solver. +This subroutine adds dense dataset to the internal storage of the builder +object. Specifying your dataset in the dense format means that the dense +version of the forest construction algorithm will be invoked. -Algorithm features: -* automatic detection of degenerate cases -* condition number estimation -* iterative refinement -* O(N^3) complexity +INPUT PARAMETERS: + S - decision forest builder object + XY - array[NPoints,NVars+1] (minimum size; actual size can + be larger, only leading part is used anyway), dataset: + * first NVars elements of each row store values of the + independent variables + * last column store class number (in 0...NClasses-1) + or real value of the dependent variable + NPoints - number of rows in the dataset, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NClasses - indicates type of the problem being solved: + * NClasses>=2 means that classification problem is + solved (last column of the dataset stores class + number) + * NClasses=1 means that regression problem is solved + (last column of the dataset stores variable value) -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system - ! and performs iterative refinement, which results in - ! significant performance penalty when compared with "fast" - ! version which just performs LU decomposition and calls - ! triangular solver. - ! - ! This performance penalty is especially visible in the - ! multithreaded mode, because both condition number estimation - ! and iterative refinement are inherently sequential - ! calculations. - ! - ! Thus, if you need high performance and if you are pretty sure - ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, CMatrixSolveFast() function. +OUTPUT PARAMETERS: + S - decision forest builder -COMMERCIAL EDITION OF ALGLIB: + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dfbuildersetdataset( + decisionforestbuilder s, + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + const xparams _params = alglib::xdefault); - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function tells decision forest construction algorithm to skip +variable importance estimation. -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - B - array[0..N-1], right part +INPUT PARAMETERS: + S - decision forest builder object -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or exactly singular. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N], it contains: - * info>0 => solution - * info=-3 => filled by zeros +OUTPUT PARAMETERS: + S - decision forest builder object. Next call to the forest + construction function will result in forest being built + without variable importance estimation. -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 29.07.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixsolve( - complex_2d_array a, - ae_int_t n, - complex_1d_array b, - ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); -void alglib::smp_cmatrixsolve( - complex_2d_array a, - ae_int_t n, - complex_1d_array b, - ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); +
    void alglib::dfbuildersetimportancenone( + decisionforestbuilder s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complex dense solver for A*x=B with N*N complex matrix A and N*1 complex -vectors x and b. "Fast-but-lightweight" version of the solver. +This function tells decision forest construction algorithm to use +out-of-bag version of Gini variable importance estimation (also known as +OOB-MDI). -Algorithm features: -* O(N^3) complexity -* no additional time consuming features, just triangular solver +This version of importance estimation algorithm analyzes mean decrease in +impurity (MDI) on out-of-bag sample during splits. The result is divided +by impurity at the root node in order to produce estimate in [0,1] range. -COMMERCIAL EDITION OF ALGLIB: +Such estimates are fast to calculate and resistant to overfitting issues +(thanks to the out-of-bag estimates used). However, OOB Gini rating has +following downsides: +* there exist some bias towards continuous and high-cardinality categorical + variables +* Gini rating allows us to order variables by importance, but it is hard + to define importance of the variable by itself. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +NOTE: informally speaking, MDA (permutation importance) rating answers the + question "what part of the model predictive power is ruined by + permuting k-th variable?" while MDI tells us "what part of the model + predictive power was achieved due to usage of k-th variable". + + Thus, MDA rates each variable independently at "0 to 1" scale while + MDI (and OOB-MDI too) tends to divide "unit amount of importance" + between several important variables. + + If all variables are equally important, they will have same + MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to 1/NVars. + However, roughly same picture will be produced for the "all + variables provide information no one is critical" situation and for + the "all variables are critical, drop any one, everything is ruined" + situation. + + Contrary to that, MDA will rate critical variable as ~1.0 important, + and important but non-critical variable will have less than unit + rating. + +NOTE: quite an often MDA and MDI return same results. It generally happens + on problems with low test set error (a few percents at most) and + large enough training set to avoid overfitting. + + The difference between MDA, MDI and OOB-MDI becomes important only + on "hard" tasks with high test set error and/or small training set. INPUT PARAMETERS: - A - array[0..N-1,0..N-1], system matrix - N - size of A - B - array[0..N-1], right part + S - decision forest builder object OUTPUT PARAMETERS: - Info - return code: - * -3 matrix is exactly singular (ill conditioned matrices - are not recognized). - * -1 N<=0 was passed - * 1 task is solved - B - array[N]: - * info>0 => overwritten by solution - * info=-3 => filled by zeros + S - decision forest builder object. Next call to the forest + construction function will produce: + * importance estimates in rep.varimportances field + * variable ranks in rep.topvars field -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 29.07.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixsolvefast( - complex_2d_array a, - ae_int_t n, - complex_1d_array& b, - ae_int_t& info); -void alglib::smp_cmatrixsolvefast( - complex_2d_array a, - ae_int_t n, - complex_1d_array& b, - ae_int_t& info); +
    void alglib::dfbuildersetimportanceoobgini( + decisionforestbuilder s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complex dense solver for A*X=B with N*N complex matrix A, N*M complex -matrices X and B. "Slow-but-feature-rich" version which provides -additional functions, at the cost of slower performance. Faster version -may be invoked with CMatrixSolveMFast() function. +This function tells decision forest construction algorithm to use +permutation variable importance estimator (also known as MDA). -Algorithm features: -* automatic detection of degenerate cases -* condition number estimation -* iterative refinement -* O(N^3+M*N^2) complexity +This version of importance estimation algorithm analyzes mean increase in +out-of-bag sum of squared residuals after random permutation of J-th +variable. The result is divided by error computed with all variables being +perturbed in order to produce R-squared-like estimate in [0,1] range. -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system - ! and performs iterative refinement, which results in - ! significant performance penalty when compared with "fast" - ! version which just performs LU decomposition and calls - ! triangular solver. - ! - ! This performance penalty is especially visible in the - ! multithreaded mode, because both condition number estimation - ! and iterative refinement are inherently sequential - ! calculations. - ! - ! Thus, if you need high performance and if you are pretty sure - ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, CMatrixSolveMFast() function. +Such estimate is slower to calculate than Gini-based rating because it +needs multiple inference runs for each of variables being studied. -COMMERCIAL EDITION OF ALGLIB: +ALGLIB uses parallelized and highly optimized algorithm which analyzes +path through the decision tree and allows to handle most perturbations +in O(1) time; nevertheless, requesting MDA importances may increase forest +construction time from 10% to 200% (or more, if you have thousands of +variables). - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +However, MDA rating has following benefits over Gini-based ones: +* no bias towards specific variable types +* ability to directly evaluate "absolute" importance of some variable at + "0 to 1" scale (contrary to Gini-based rating, which returns comparative + importances). -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - B - array[0..N-1,0..M-1], right part - M - right part size - RFS - iterative refinement switch: - * True - refinement is used. - Less performance, more precision. - * False - refinement is not used. - More performance, less precision. +NOTE: informally speaking, MDA (permutation importance) rating answers the + question "what part of the model predictive power is ruined by + permuting k-th variable?" while MDI tells us "what part of the model + predictive power was achieved due to usage of k-th variable". -OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or exactly singular. - X is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N,M], it contains: - * info>0 => solution - * info=-3 => filled by zeros + Thus, MDA rates each variable independently at "0 to 1" scale while + MDI (and OOB-MDI too) tends to divide "unit amount of importance" + between several important variables. + + If all variables are equally important, they will have same + MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to 1/NVars. + However, roughly same picture will be produced for the "all + variables provide information no one is critical" situation and for + the "all variables are critical, drop any one, everything is ruined" + situation. + + Contrary to that, MDA will rate critical variable as ~1.0 important, + and important but non-critical variable will have less than unit + rating. + +NOTE: quite an often MDA and MDI return same results. It generally happens + on problems with low test set error (a few percents at most) and + large enough training set to avoid overfitting. + + The difference between MDA, MDI and OOB-MDI becomes important only + on "hard" tasks with high test set error and/or small training set. + +INPUT PARAMETERS: + S - decision forest builder object + +OUTPUT PARAMETERS: + S - decision forest builder object. Next call to the forest + construction function will produce: + * importance estimates in rep.varimportances field + * variable ranks in rep.topvars field -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 29.07.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixsolvem( - complex_2d_array a, - ae_int_t n, - complex_2d_array b, - ae_int_t m, - bool rfs, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); -void alglib::smp_cmatrixsolvem( - complex_2d_array a, - ae_int_t n, - complex_2d_array b, - ae_int_t m, - bool rfs, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); +
    void alglib::dfbuildersetimportancepermutation( + decisionforestbuilder s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complex dense solver for A*X=B with N*N complex matrix A, N*M complex -matrices X and B. "Fast-but-lightweight" version which provides just -triangular solver - and no additional functions like iterative refinement -or condition number estimation. +This function tells decision forest construction algorithm to use +Gini impurity based variable importance estimation (also known as MDI). -Algorithm features: -* O(N^3+M*N^2) complexity -* no additional time consuming functions +This version of importance estimation algorithm analyzes mean decrease in +impurity (MDI) on training sample during splits. The result is divided +by impurity at the root node in order to produce estimate in [0,1] range. -COMMERCIAL EDITION OF ALGLIB: +Such estimates are fast to calculate and beautifully normalized (sum to +one) but have following downsides: +* They ALWAYS sum to 1.0, even if output is completely unpredictable. I.e. + MDI allows to order variables by importance, but does not tell us about + "absolute" importances of variables +* there exist some bias towards continuous and high-cardinality categorical + variables - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +NOTE: informally speaking, MDA (permutation importance) rating answers the + question "what part of the model predictive power is ruined by + permuting k-th variable?" while MDI tells us "what part of the model + predictive power was achieved due to usage of k-th variable". -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - B - array[0..N-1,0..M-1], right part - M - right part size + Thus, MDA rates each variable independently at "0 to 1" scale while + MDI (and OOB-MDI too) tends to divide "unit amount of importance" + between several important variables. + + If all variables are equally important, they will have same + MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to 1/NVars. + However, roughly same picture will be produced for the "all + variables provide information no one is critical" situation and for + the "all variables are critical, drop any one, everything is ruined" + situation. + + Contrary to that, MDA will rate critical variable as ~1.0 important, + and important but non-critical variable will have less than unit + rating. + +NOTE: quite an often MDA and MDI return same results. It generally happens + on problems with low test set error (a few percents at most) and + large enough training set to avoid overfitting. + + The difference between MDA, MDI and OOB-MDI becomes important only + on "hard" tasks with high test set error and/or small training set. + +INPUT PARAMETERS: + S - decision forest builder object OUTPUT PARAMETERS: - Info - return code: - * -3 matrix is exactly singular (ill conditioned matrices - are not recognized). - * -1 N<=0 was passed - * 1 task is solved - B - array[N,M]: - * info>0 => overwritten by solution - * info=-3 => filled by zeros + S - decision forest builder object. Next call to the forest + construction function will produce: + * importance estimates in rep.varimportances field + * variable ranks in rep.topvars field -- ALGLIB -- - Copyright 16.03.2015 by Bochkanov Sergey + Copyright 29.07.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixsolvemfast( - complex_2d_array a, - ae_int_t n, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_cmatrixsolvemfast( - complex_2d_array a, - ae_int_t n, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); +
    void alglib::dfbuildersetimportancetrngini( + decisionforestbuilder s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*x=b with N*N Hermitian positive definite matrix A given -by its Cholesky decomposition, and N*1 complex vectors x and b. This is -"slow-but-feature-rich" version of the solver which estimates condition -number of the system. +This function sets random decision forest construction algorithm. -Algorithm features: -* automatic detection of degenerate cases -* O(N^2) complexity -* condition number estimation -* matrix is represented by its upper or lower triangle +As for now, only one decision forest construction algorithm is supported - +a dense "baseline" RDF algorithm. -No iterative refinement is provided because such partial representation of -matrix does not allow efficient calculation of extra-precise matrix-vector -products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you -need iterative refinement. +INPUT PARAMETERS: + S - decision forest builder object + AlgoType - algorithm type: + * 0 = baseline dense RDF -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system, - ! which results in 10-15x performance penalty when compared - ! with "fast" version which just calls triangular solver. - ! - ! This performance penalty is insignificant when compared with - ! cost of large LU decomposition. However, if you call this - ! function many times for the same left side, this overhead - ! BECOMES significant. It also becomes significant for small- - ! scale problems (N<50). - ! - ! In such cases we strongly recommend you to use faster solver, - ! HPDMatrixCholeskySolveFast() function. +OUTPUT PARAMETERS: + S - decision forest builder, see -INPUT PARAMETERS - CHA - array[0..N-1,0..N-1], Cholesky decomposition, - SPDMatrixCholesky result - N - size of A - IsUpper - what half of CHA is provided - B - array[0..N-1], right part + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dfbuildersetrdfalgo( + decisionforestbuilder s, + ae_int_t algotype, + const xparams _params = alglib::xdefault); -OUTPUT PARAMETERS - Info - return code: - * -3 A is is exactly singular or ill conditioned - X is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task is solved - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N]: - * for info>0 - solution - * for info=-3 - filled by zeros +
    + +
    +
    /************************************************************************* +This function sets split selection algorithm used by decision forest +classifier. You may choose several algorithms, with different speed and +quality of the results. + +INPUT PARAMETERS: + S - decision forest builder object + SplitStrength- split type: + * 0 = split at the random position, fastest one + * 1 = split at the middle of the range + * 2 = strong split at the best point of the range (default) + +OUTPUT PARAMETERS: + S - decision forest builder, see -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixcholeskysolve( - complex_2d_array cha, - ae_int_t n, - bool isupper, - complex_1d_array b, - ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); +
    void alglib::dfbuildersetrdfsplitstrength( + decisionforestbuilder s, + ae_int_t splitstrength, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver for A*x=b with N*N Hermitian positive definite matrix A given -by its Cholesky decomposition, and N*1 complex vectors x and b. This is -"fast-but-lightweight" version of the solver. +This function sets number of variables (in [1,NVars] range) used by +decision forest construction algorithm. -Algorithm features: -* O(N^2) complexity -* matrix is represented by its upper or lower triangle -* no additional time-consuming features +The default option is to use roughly sqrt(NVars) variables. -INPUT PARAMETERS - CHA - array[0..N-1,0..N-1], Cholesky decomposition, - SPDMatrixCholesky result - N - size of A - IsUpper - what half of CHA is provided - B - array[0..N-1], right part +INPUT PARAMETERS: + S - decision forest builder object + RndVars - number of randomly selected variables; values outside + of [1,NVars] range are silently clipped. -OUTPUT PARAMETERS - Info - return code: - * -3 A is is exactly singular or ill conditioned - B is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task is solved - B - array[N]: - * for info>0 - overwritten by solution - * for info=-3 - filled by zeros +OUTPUT PARAMETERS: + S - decision forest builder -- ALGLIB -- - Copyright 18.03.2015 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixcholeskysolvefast( - complex_2d_array cha, - ae_int_t n, - bool isupper, - complex_1d_array& b, - ae_int_t& info); +
    void alglib::dfbuildersetrndvars( + decisionforestbuilder s, + ae_int_t rndvars, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver for A*X=B with N*N Hermitian positive definite matrix A given -by its Cholesky decomposition and N*M complex matrices X and B. This is -"slow-but-feature-rich" version of the solver which, in addition to the -solution, estimates condition number of the system. +This function tells decision forest builder to automatically choose number +of variables used by decision forest construction algorithm. Roughly +sqrt(NVars) variables will be used. -Algorithm features: -* automatic detection of degenerate cases -* O(M*N^2) complexity -* condition number estimation -* matrix is represented by its upper or lower triangle +INPUT PARAMETERS: + S - decision forest builder object -No iterative refinement is provided because such partial representation of -matrix does not allow efficient calculation of extra-precise matrix-vector -products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you -need iterative refinement. +OUTPUT PARAMETERS: + S - decision forest builder -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system, - ! which results in significant performance penalty when - ! compared with "fast" version which just calls triangular - ! solver. Amount of overhead introduced depends on M (the - ! larger - the more efficient). - ! - ! This performance penalty is insignificant when compared with - ! cost of large Cholesky decomposition. However, if you call - ! this function many times for the same left side, this - ! overhead BECOMES significant. It also becomes significant - ! for small-scale problems (N<50). - ! - ! In such cases we strongly recommend you to use faster solver, - ! HPDMatrixCholeskySolveMFast() function. + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dfbuildersetrndvarsauto( + decisionforestbuilder s, + const xparams _params = alglib::xdefault); +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function sets number of variables used by decision forest construction +algorithm as a fraction of total variable count (0,1) range. -INPUT PARAMETERS - CHA - array[N,N], Cholesky decomposition, - HPDMatrixCholesky result - N - size of CHA - IsUpper - what half of CHA is provided - B - array[N,M], right part - M - right part size +The default option is to use roughly sqrt(NVars) variables. + +INPUT PARAMETERS: + S - decision forest builder object + F - round(NVars*F) variables are selected OUTPUT PARAMETERS: - Info - return code: - * -3 A is singular, or VERY close to singular. - X is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task was solved - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N]: - * for info>0 contains solution - * for info=-3 filled by zeros + S - decision forest builder -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixcholeskysolvem( - complex_2d_array cha, - ae_int_t n, - bool isupper, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); -void alglib::smp_hpdmatrixcholeskysolvem( - complex_2d_array cha, - ae_int_t n, - bool isupper, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); +
    void alglib::dfbuildersetrndvarsratio( + decisionforestbuilder s, + double f, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver for A*X=B with N*N Hermitian positive definite matrix A given -by its Cholesky decomposition and N*M complex matrices X and B. This is -"fast-but-lightweight" version of the solver. +This function sets seed used by internal RNG for random subsampling and +random selection of variable subsets. -Algorithm features: -* O(M*N^2) complexity -* matrix is represented by its upper or lower triangle -* no additional time-consuming features +By default random seed is used, i.e. every time you build decision forest, +we seed generator with new value obtained from system-wide RNG. Thus, +decision forest builder returns non-deterministic results. You can change +such behavior by specyfing fixed positive seed value. -INPUT PARAMETERS - CHA - array[N,N], Cholesky decomposition, - HPDMatrixCholesky result - N - size of CHA - IsUpper - what half of CHA is provided - B - array[N,M], right part - M - right part size +INPUT PARAMETERS: + S - decision forest builder object + SeedVal - seed value: + * positive values are used for seeding RNG with fixed + seed, i.e. subsequent runs on same data will return + same decision forests + * non-positive seed means that random seed is used + for every run of builder, i.e. subsequent runs on + same datasets will return slightly different + decision forests OUTPUT PARAMETERS: - Info - return code: - * -3 A is singular, or VERY close to singular. - X is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task was solved - B - array[N]: - * for info>0 overwritten by solution - * for info=-3 filled by zeros + S - decision forest builder, see -- ALGLIB -- - Copyright 18.03.2015 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixcholeskysolvemfast( - complex_2d_array cha, - ae_int_t n, - bool isupper, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_hpdmatrixcholeskysolvemfast( - complex_2d_array cha, - ae_int_t n, - bool isupper, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); +
    void alglib::dfbuildersetseed( + decisionforestbuilder s, + ae_int_t seedval, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver for A*x=b, with N*N Hermitian positive definite matrix A, and -N*1 complex vectors x and b. "Slow-but-feature-rich" version of the -solver. - -Algorithm features: -* automatic detection of degenerate cases -* condition number estimation -* O(N^3) complexity -* matrix is represented by its upper or lower triangle +This function sets size of dataset subsample generated the decision forest +construction algorithm. Size is specified as a fraction of total dataset +size. -No iterative refinement is provided because such partial representation of -matrix does not allow efficient calculation of extra-precise matrix-vector -products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you -need iterative refinement. +The default option is to use 50% of the dataset for training, 50% for the +OOB estimates. You can decrease fraction F down to 10%, 1% or even below +in order to reduce overfitting. -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system, - ! which results in significant performance penalty when - ! compared with "fast" version which just performs Cholesky - ! decomposition and calls triangular solver. - ! - ! This performance penalty is especially visible in the - ! multithreaded mode, because both condition number estimation - ! and iterative refinement are inherently sequential - ! calculations. - ! - ! Thus, if you need high performance and if you are pretty sure - ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, HPDMatrixSolveFast() function. +INPUT PARAMETERS: + S - decision forest builder object + F - fraction of the dataset to use, in (0,1] range. Values + outside of this range will be silently clipped. At + least one element is always selected for the training + set. -COMMERCIAL EDITION OF ALGLIB: +OUTPUT PARAMETERS: + S - decision forest builder - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dfbuildersetsubsampleratio( + decisionforestbuilder s, + double f, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - IsUpper - what half of A is provided - B - array[0..N-1], right part +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This subroutine builds random decision forest. -OUTPUT PARAMETERS - Info - same as in RMatrixSolve - Returns -3 for non-HPD matrices. - Rep - same as in RMatrixSolve - X - same as in RMatrixSolve +--------- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT --------- -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 19.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixsolve( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_1d_array b, +
    void alglib::dfbuildrandomdecisionforest( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + ae_int_t ntrees, + double r, ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); -void alglib::smp_hpdmatrixsolve( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_1d_array b, + decisionforest& df, + dfreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine builds random decision forest. + +--------- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT --------- + + -- ALGLIB -- + Copyright 19.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dfbuildrandomdecisionforestx1( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + ae_int_t ntrees, + ae_int_t nrndvars, + double r, ae_int_t& info, - densesolverreport& rep, - complex_1d_array& x); + decisionforest& df, + dfreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*x=b, with N*N Hermitian positive definite matrix A, and -N*1 complex vectors x and b. "Fast-but-lightweight" version of the -solver without additional functions. +This function returns most probable class number for an input X. It is +same as calling dfprocess(model,x,y), then determining i=argmax(y[i]) and +returning i. -Algorithm features: -* O(N^3) complexity -* matrix is represented by its upper or lower triangle -* no additional time consuming functions +A class number in [0,NOut) range in returned for classification problems, +-1 is returned when this function is called for regression problems. -COMMERCIAL EDITION OF ALGLIB: +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + Use dftsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - IsUpper - what half of A is provided - B - array[0..N-1], right part +INPUT PARAMETERS: + Model - decision forest model + X - input vector, array[0..NVars-1]. -OUTPUT PARAMETERS - Info - return code: - * -3 A is is exactly singular or not positive definite - X is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task was solved - B - array[0..N-1]: - * overwritten by solution - * zeros, if A is exactly singular (diagonal of its LU - decomposition has exact zeros). +RESULT: + class number, -1 for regression tasks -- ALGLIB -- - Copyright 17.03.2015 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixsolvefast( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_1d_array& b, - ae_int_t& info); -void alglib::smp_hpdmatrixsolvefast( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_1d_array& b, - ae_int_t& info); +
    ae_int_t alglib::dfclassify( + decisionforest model, + real_1d_array x, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver for A*X=B, with N*N Hermitian positive definite matrix A and -N*M complex matrices X and B. "Slow-but-feature-rich" version of the -solver. - -Algorithm features: -* automatic detection of degenerate cases -* condition number estimation -* O(N^3+M*N^2) complexity -* matrix is represented by its upper or lower triangle - -No iterative refinement is provided because such partial representation of -matrix does not allow efficient calculation of extra-precise matrix-vector -products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you -need iterative refinement. - -IMPORTANT: ! this function is NOT the most efficient linear solver provided - ! by ALGLIB. It estimates condition number of linear system, - ! which results in significant performance penalty when - ! compared with "fast" version which just calls triangular - ! solver. - ! - ! This performance penalty is especially apparent when you use - ! ALGLIB parallel capabilities (condition number estimation is - ! inherently sequential). It also becomes significant for - ! small-scale problems (N<100). - ! - ! In such cases we strongly recommend you to use faster solver, - ! HPDMatrixSolveMFast() function. +This function creates buffer structure which can be used to perform +parallel inference requests. -COMMERCIAL EDITION OF ALGLIB: +DF subpackage provides two sets of computing functions - ones which use +internal buffer of DF model (these functions are single-threaded because +they use same buffer, which can not shared between threads), and ones +which use external buffer. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +This function is used to initialize external buffer. INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - IsUpper - what half of A is provided - B - array[0..N-1,0..M-1], right part - M - right part size + Model - DF model which is associated with newly created buffer OUTPUT PARAMETERS - Info - same as in RMatrixSolve. - Returns -3 for non-HPD matrices. - Rep - same as in RMatrixSolve - X - same as in RMatrixSolve + Buf - external buffer. + + +IMPORTANT: buffer object should be used only with model which was used to + initialize buffer. Any attempt to use buffer with different + object is dangerous - you may get integrity check failure + (exception) because sizes of internal arrays do not fit to + dimensions of the model structure. -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixsolvem( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); -void alglib::smp_hpdmatrixsolvem( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - complex_2d_array& x); +
    void alglib::dfcreatebuffer( + decisionforest model, + decisionforestbuffer& buf, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B, with N*N Hermitian positive definite matrix A and -N*M complex matrices X and B. "Fast-but-lightweight" version of the solver. +Inference using decision forest -Algorithm features: -* O(N^3+M*N^2) complexity -* matrix is represented by its upper or lower triangle -* no additional time consuming features like condition number estimation +IMPORTANT: this function is thread-unsafe and may modify internal + structures of the model! You can not use same model object for + parallel evaluation from several threads. -COMMERCIAL EDITION OF ALGLIB: + Use dftsprocess() with independent thread-local buffers if + you need thread-safe evaluation. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +INPUT PARAMETERS: + DF - decision forest model + X - input vector, array[NVars] + Y - possibly preallocated buffer, reallocated if too small -INPUT PARAMETERS - A - array[0..N-1,0..N-1], system matrix - N - size of A - IsUpper - what half of A is provided - B - array[0..N-1,0..M-1], right part - M - right part size +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + +See also DFProcessI. -OUTPUT PARAMETERS - Info - return code: - * -3 A is is exactly singular or is not positive definite. - B is filled by zeros in such cases. - * -1 N<=0 was passed - * 1 task is solved - B - array[0..N-1]: - * overwritten by solution - * zeros, if problem was not solved -- ALGLIB -- - Copyright 17.03.2015 by Bochkanov Sergey + Copyright 16.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixsolvemfast( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_hpdmatrixsolvemfast( - complex_2d_array a, - ae_int_t n, - bool isupper, - complex_2d_array& b, - ae_int_t m, - ae_int_t& info); +
    void alglib::dfprocess( + decisionforest df, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Dense solver. +This function returns first component of the inferred vector (i.e. one +with index #0). -This subroutine solves a system A*x=b, where A is NxN non-denegerate -real matrix given by its LU decomposition, x and b are real vectors. This -is "slow-but-robust" version of the linear LU-based solver. Faster version -is RMatrixLUSolveFast() function. +It is a convenience wrapper for dfprocess() intended for either: +* 1-dimensional regression problems +* 2-class classification problems + +In the former case this function returns inference result as scalar, which +is definitely more convenient that wrapping it as vector. In the latter +case it returns probability of object belonging to class #0. + +If you call it for anything different from two cases above, it will work +as defined, i.e. return y[0], although it is of less use in such cases. + +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. + + Use dftsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. + +INPUT PARAMETERS: + Model - DF model + X - input vector, array[0..NVars-1]. + +RESULT: + Y[0] + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::dfprocess0( + decisionforest model, + real_1d_array x, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +'interactive' variant of DFProcess for languages like Python which support +constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter + +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. + +IMPORTANT: this function is thread-unsafe and may modify internal + structures of the model! You can not use same model object for + parallel evaluation from several threads. + + Use dftsprocess() with independent thread-local buffers if + you need thread-safe evaluation. + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dfprocessi( + decisionforest df, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Relative classification error on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + percent of incorrectly classified cases. + Zero if model solves regression task. + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::dfrelclserror( + decisionforest df, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +RMS error on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + root mean square error. + Its meaning for regression task is obvious. As for + classification task, RMS error means error when estimating posterior + probabilities. + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::dfrmserror( + decisionforest df, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function serializes data structure to string. + +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void dfserialize(decisionforest &obj, std::string &s_out); +void dfserialize(decisionforest &obj, std::ostream &s_out); +
    + +
    +
    /************************************************************************* +Inference using decision forest + +Thread-safe procesing using external buffer for temporaries. + +This function is thread-safe (i.e . you can use same DF model from +multiple threads) as long as you use different buffer objects for different +threads. + +INPUT PARAMETERS: + DF - decision forest model + Buf - buffer object, must be allocated specifically for this + model with dfcreatebuffer(). + X - input vector, array[NVars] + Y - possibly preallocated buffer, reallocated if too small + +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + +See also DFProcessI. + + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::dftsprocess( + decisionforest df, + decisionforestbuffer buf, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function unserializes data structure from string. +*************************************************************************/ +
    void dfunserialize(const std::string &s_in, decisionforest &obj); +void dfunserialize(const std::istream &s_in, decisionforest &obj); +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // The very simple classification example: classify points (x,y) in 2D space
    +    // as ones with x>=0 and ones with x<0 (y is ignored, but our classifier
    +    // has to find out it).
    +    //
    +    // First, we have to create decision forest builder object, load dataset and
    +    // specify training settings. Our dataset is specified as matrix, which has
    +    // following format:
    +    //
    +    //     x0 y0 class0
    +    //     x1 y1 class1
    +    //     x2 y2 class2
    +    //     ....
    +    //
    +    // Here xi and yi can be any values (and in fact you can have any number of
    +    // independent variables), and classi MUST be integer number in [0,NClasses)
    +    // range. In our example we denote points with x>=0 as class #0, and
    +    // ones with negative xi as class #1.
    +    //
    +    // NOTE: if you want to solve regression problem, specify NClasses=1. In
    +    //       this case last column of xy can be any numeric value.
    +    //
    +    // For the sake of simplicity, our example includes only 4-point dataset.
    +    // However, random forests are able to cope with extremely large datasets
    +    // having millions of examples.
    +    //
    +    decisionforestbuilder builder;
    +    ae_int_t nvars = 2;
    +    ae_int_t nclasses = 2;
    +    ae_int_t npoints = 4;
    +    real_2d_array xy = "[[1,1,0],[1,-1,0],[-1,1,1],[-1,-1,1]]";
    +
    +    dfbuildercreate(builder);
    +    dfbuildersetdataset(builder, xy, npoints, nvars, nclasses);
    +
    +    // in our example we train decision forest using full sample - it allows us
    +    // to get zero classification error. However, in practical applications smaller
    +    // values are used: 50%, 25%, 5% or even less.
    +    dfbuildersetsubsampleratio(builder, 1.0);
    +
    +    // we train random forest with just one tree; again, in real life situations
    +    // you typically need from 50 to 500 trees.
    +    ae_int_t ntrees = 1;
    +    decisionforest forest;
    +    dfreport rep;
    +    dfbuilderbuildrandomforest(builder, ntrees, forest, rep);
    +
    +    // with such settings (100% of the training set is used) you can expect
    +    // zero classification error. Beautiful results, but remember - in real life
    +    // you do not need zero TRAINING SET error, you need good generalization.
    +
    +    printf("%.4f\n", double(rep.relclserror)); // EXPECTED: 0.0000
    +
    +    // now, let's perform some simple processing with dfprocess()
    +    real_1d_array x = "[+1,0]";
    +    real_1d_array y = "[]";
    +    dfprocess(forest, x, y);
    +    printf("%s\n", y.tostring(3).c_str()); // EXPECTED: [+1,0]
    +
    +    // another option is to use dfprocess0() which returns just first component
    +    // of the output vector y. ideal for regression problems and binary classifiers.
    +    double y0;
    +    y0 = dfprocess0(forest, x);
    +    printf("%.3f\n", double(y0)); // EXPECTED: 1.000
    +
    +    // finally, you can use dfclassify() which returns most probable class index (i.e. argmax y[i]).
    +    ae_int_t i;
    +    i = dfclassify(forest, x);
    +    printf("%d\n", int(i)); // EXPECTED: 0
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // The very simple regression example: model f(x,y)=x+y
    +    //
    +    // First, we have to create DF builder object, load dataset and specify
    +    // training settings. Our dataset is specified as matrix, which has following
    +    // format:
    +    //
    +    //     x0 y0 f0
    +    //     x1 y1 f1
    +    //     x2 y2 f2
    +    //     ....
    +    //
    +    // Here xi and yi can be any values, and fi is a dependent function value.
    +    //
    +    // NOTE: you can also solve classification problems with DF models, see
    +    //       another example for this unit.
    +    //
    +    decisionforestbuilder builder;
    +    ae_int_t nvars = 2;
    +    ae_int_t nclasses = 1;
    +    ae_int_t npoints = 4;
    +    real_2d_array xy = "[[1,1,+2],[1,-1,0],[-1,1,0],[-1,-1,-2]]";
    +
    +    dfbuildercreate(builder);
    +    dfbuildersetdataset(builder, xy, npoints, nvars, nclasses);
    +
    +    // in our example we train decision forest using full sample - it allows us
    +    // to get zero classification error. However, in practical applications smaller
    +    // values are used: 50%, 25%, 5% or even less.
    +    dfbuildersetsubsampleratio(builder, 1.0);
    +
    +    // we train random forest with just one tree; again, in real life situations
    +    // you typically need from 50 to 500 trees.
    +    ae_int_t ntrees = 1;
    +    decisionforest model;
    +    dfreport rep;
    +    dfbuilderbuildrandomforest(builder, ntrees, model, rep);
    +
    +    // with such settings (full sample is used) you can expect zero RMS error on the
    +    // training set. Beautiful results, but remember - in real life you do not
    +    // need zero TRAINING SET error, you need good generalization.
    +
    +    printf("%.4f\n", double(rep.rmserror)); // EXPECTED: 0.0000
    +
    +    // now, let's perform some simple processing with dfprocess()
    +    real_1d_array x = "[+1,+1]";
    +    real_1d_array y = "[]";
    +    dfprocess(model, x, y);
    +    printf("%s\n", y.tostring(3).c_str()); // EXPECTED: [+2]
    +
    +    // another option is to use dfprocess0() which returns just first component
    +    // of the output vector y. ideal for regression problems and binary classifiers.
    +    double y0;
    +    y0 = dfprocess0(model, x);
    +    printf("%.3f\n", double(y0)); // EXPECTED: 2.000
    +
    +    // there also exist another convenience function, dfclassify(),
    +    // but it does not work for regression problems - it always returns -1.
    +    ae_int_t i;
    +    i = dfclassify(model, x);
    +    printf("%d\n", int(i)); // EXPECTED: -1
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* + +*************************************************************************/ +
    class densesolverlsreport +{ + double r2; + real_2d_array cx; + ae_int_t n; + ae_int_t k; +}; + +
    + +
    +
    /************************************************************************* + +*************************************************************************/ +
    class densesolverreport +{ + double r1; + double rinf; +}; + +
    + +
    +
    /************************************************************************* +Complex dense linear solver for A*x=b with complex N*N A given by its LU +decomposition and N*1 vectors x and b. This is "slow-but-robust" version +of the complex linear solver with additional features which add +significant performance overhead. Faster version is CMatrixLUSolveFast() +function. Algorithm features: * automatic detection of degenerate cases * O(N^2) complexity * condition number estimation -No iterative refinement is provided because exact form of original matrix -is not known to subroutine. Use RMatrixSolve or RMatrixMixedSolve if you +No iterative refinement is provided because exact form of original matrix +is not known to subroutine. Use CMatrixSolve or CMatrixMixedSolve if you need iterative refinement. IMPORTANT: ! this function is NOT the most efficient linear solver provided @@ -8770,13 +9030,13 @@ ! scale problems. ! ! In such cases we strongly recommend you to use faster solver, - ! RMatrixLUSolveFast() function. + ! CMatrixLUSolveFast() function. INPUT PARAMETERS - LUA - array[N,N], LU decomposition, RMatrixLU result - P - array[N], pivots array, RMatrixLU result + LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result + P - array[0..N-1], pivots array, CMatrixLU result N - size of A - B - array[N], right part + B - array[0..N-1], right part OUTPUT PARAMETERS Info - return code: @@ -8791,37 +9051,35 @@ * info>0 => solution * info=-3 => filled by zeros - -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlusolve( - real_2d_array lua, +
    void alglib::cmatrixlusolve( + complex_2d_array lua, integer_1d_array p, ae_int_t n, - real_1d_array b, + complex_1d_array b, ae_int_t& info, densesolverreport& rep, - real_1d_array& x); + complex_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -This subroutine solves a system A*x=b, where A is NxN non-denegerate -real matrix given by its LU decomposition, x and b are real vectors. This -is "fast-without-any-checks" version of the linear LU-based solver. Slower -but more robust version is RMatrixLUSolve() function. +Complex dense linear solver for A*x=b with N*N complex A given by its LU +decomposition and N*1 vectors x and b. This is fast lightweight version +of solver, which is significantly faster than CMatrixLUSolve(), but does +not provide additional information (like condition numbers). Algorithm features: * O(N^2) complexity -* fast algorithm without ANY additional checks, just triangular solver +* no additional time-consuming features, just triangular solver INPUT PARAMETERS - LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result - P - array[0..N-1], pivots array, RMatrixLU result + LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result + P - array[0..N-1], pivots array, CMatrixLU result N - size of A B - array[0..N-1], right part @@ -8829,34 +9087,36 @@ Info - return code: * -3 matrix is exactly singular (ill conditioned matrices are not recognized). - X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task is solved B - array[N]: * info>0 => overwritten by solution * info=-3 => filled by zeros +NOTE: unlike CMatrixLUSolve(), this function does NOT check for + near-degeneracy of input matrix. It checks for EXACT degeneracy, + because this check is easy to do. However, very badly conditioned + matrices may went unnoticed. + + -- ALGLIB -- - Copyright 18.03.2015 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlusolvefast( - real_2d_array lua, +
    void alglib::cmatrixlusolvefast( + complex_2d_array lua, integer_1d_array p, ae_int_t n, - real_1d_array& b, - ae_int_t& info); + complex_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -Similar to RMatrixLUSolve() but solves task with multiple right parts -(where b and x are NxM matrices). This is "robust-but-slow" version of -LU-based solver which performs additional checks for non-degeneracy of -inputs (condition number estimation). If you need best performance, use -"fast-without-any-checks" version, RMatrixLUSolveMFast(). +Dense solver for A*X=B with N*N complex A given by its LU decomposition, +and N*M matrices X and B (multiple right sides). "Slow-but-feature-rich" +version of the solver. Algorithm features: * automatic detection of degenerate cases @@ -8864,7 +9124,7 @@ * condition number estimation No iterative refinement is provided because exact form of original matrix -is not known to subroutine. Use RMatrixSolve or RMatrixMixedSolve if you +is not known to subroutine. Use CMatrixSolve or CMatrixMixedSolve if you need iterative refinement. IMPORTANT: ! this function is NOT the most efficient linear solver provided @@ -8879,45 +9139,24 @@ ! small-scale problems. ! ! In such cases we strongly recommend you to use faster solver, - ! RMatrixLUSolveMFast() function. - -COMMERCIAL EDITION OF ALGLIB: + ! CMatrixLUSolveMFast() function. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Triangular solver is relatively easy to parallelize. - ! However, parallelization will be efficient only for large number of - ! right parts M. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS - LUA - array[N,N], LU decomposition, RMatrixLU result - P - array[N], pivots array, RMatrixLU result + LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result + P - array[0..N-1], pivots array, RMatrixLU result N - size of A B - array[0..N-1,0..M-1], right part M - right part size @@ -8925,7 +9164,6 @@ OUTPUT PARAMETERS Info - return code: * -3 matrix is very badly conditioned or exactly singular. - X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task is solved (but matrix A may be ill-conditioned, check R1/RInf parameters for condition numbers). @@ -8936,87 +9174,53 @@ * info>0 => solution * info=-3 => filled by zeros - -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlusolvem( - real_2d_array lua, - integer_1d_array p, - ae_int_t n, - real_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - real_2d_array& x); -void alglib::smp_rmatrixlusolvem( - real_2d_array lua, +
    void alglib::cmatrixlusolvem( + complex_2d_array lua, integer_1d_array p, ae_int_t n, - real_2d_array b, + complex_2d_array b, ae_int_t m, ae_int_t& info, densesolverreport& rep, - real_2d_array& x); + complex_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -Similar to RMatrixLUSolve() but solves task with multiple right parts, -where b and x are NxM matrices. This is "fast-without-any-checks" version -of LU-based solver. It does not estimate condition number of a system, -so it is extremely fast. If you need better detection of near-degenerate -cases, use RMatrixLUSolveM() function. +Dense solver for A*X=B with N*N complex A given by its LU decomposition, +and N*M matrices X and B (multiple right sides). "Fast-but-lightweight" +version of the solver. Algorithm features: * O(M*N^2) complexity -* fast algorithm without ANY additional checks, just triangular solver - -COMMERCIAL EDITION OF ALGLIB: +* no additional time-consuming features - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. Triangular solver is relatively easy to parallelize. - ! However, parallelization will be efficient only for large number of - ! right parts M. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: +INPUT PARAMETERS LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result P - array[0..N-1], pivots array, RMatrixLU result N - size of A B - array[0..N-1,0..M-1], right part M - right part size -OUTPUT PARAMETERS: +OUTPUT PARAMETERS Info - return code: * -3 matrix is exactly singular (ill conditioned matrices are not recognized). @@ -9026,33 +9230,24 @@ * info>0 => overwritten by solution * info=-3 => filled by zeros + -- ALGLIB -- - Copyright 18.03.2015 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlusolvemfast( - real_2d_array lua, - integer_1d_array p, - ae_int_t n, - real_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_rmatrixlusolvemfast( - real_2d_array lua, +
    void alglib::cmatrixlusolvemfast( + complex_2d_array lua, integer_1d_array p, ae_int_t n, - real_2d_array& b, + complex_2d_array& b, ae_int_t m, - ae_int_t& info); + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -This subroutine solves a system A*x=b, where BOTH ORIGINAL A AND ITS -LU DECOMPOSITION ARE KNOWN. You can use it if for some reasons you have -both A and its LU decomposition. +Dense solver. Same as RMatrixMixedSolve(), but for complex matrices. Algorithm features: * automatic detection of degenerate cases @@ -9062,8 +9257,8 @@ INPUT PARAMETERS A - array[0..N-1,0..N-1], system matrix - LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result - P - array[0..N-1], pivots array, RMatrixLU result + LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result + P - array[0..N-1], pivots array, CMatrixLU result N - size of A B - array[0..N-1], right part @@ -9083,24 +9278,22 @@ -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixmixedsolve( - real_2d_array a, - real_2d_array lua, +
    void alglib::cmatrixmixedsolve( + complex_2d_array a, + complex_2d_array lua, integer_1d_array p, ae_int_t n, - real_1d_array b, + complex_1d_array b, ae_int_t& info, densesolverreport& rep, - real_1d_array& x); + complex_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -Similar to RMatrixMixedSolve() but solves task with multiple right parts -(where b and x are NxM matrices). +Dense solver. Same as RMatrixMixedSolveM(), but for complex matrices. Algorithm features: * automatic detection of degenerate cases @@ -9110,8 +9303,8 @@ INPUT PARAMETERS A - array[0..N-1,0..N-1], system matrix - LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result - P - array[0..N-1], pivots array, RMatrixLU result + LUA - array[0..N-1,0..N-1], LU decomposition, CMatrixLU result + P - array[0..N-1], pivots array, CMatrixLU result N - size of A B - array[0..N-1,0..M-1], right part M - right part size @@ -9132,24 +9325,24 @@ -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixmixedsolvem( - real_2d_array a, - real_2d_array lua, +
    void alglib::cmatrixmixedsolvem( + complex_2d_array a, + complex_2d_array lua, integer_1d_array p, ae_int_t n, - real_2d_array b, + complex_2d_array b, ae_int_t m, ae_int_t& info, densesolverreport& rep, - real_2d_array& x); + complex_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*x=b with N*N real matrix A and N*1 real vectorx x and -b. This is "slow-but-feature rich" version of the linear solver. Faster -version is RMatrixSolveFast() function. +Complex dense solver for A*x=B with N*N complex matrix A and N*1 complex +vectors x and b. "Slow-but-feature-rich" version of the solver. Algorithm features: * automatic detection of degenerate cases @@ -9167,43 +9360,20 @@ ! This performance penalty is especially visible in the ! multithreaded mode, because both condition number estimation ! and iterative refinement are inherently sequential - ! calculations. It also very significant on small matrices. + ! calculations. ! ! Thus, if you need high performance and if you are pretty sure ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, RMatrixSolveFast() function. - -COMMERCIAL EDITION OF ALGLIB: + ! you to use faster solver, CMatrixSolveFast() function. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -9230,81 +9400,45 @@ -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixsolve( - real_2d_array a, - ae_int_t n, - real_1d_array b, - ae_int_t& info, - densesolverreport& rep, - real_1d_array& x); -void alglib::smp_rmatrixsolve( - real_2d_array a, +
    void alglib::cmatrixsolve( + complex_2d_array a, ae_int_t n, - real_1d_array b, + complex_1d_array b, ae_int_t& info, densesolverreport& rep, - real_1d_array& x); + complex_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -This subroutine solves a system A*x=b, where A is NxN non-denegerate -real matrix, x and b are vectors. This is a "fast" version of linear -solver which does NOT provide any additional functions like condition -number estimation or iterative refinement. +Complex dense solver for A*x=B with N*N complex matrix A and N*1 complex +vectors x and b. "Fast-but-lightweight" version of the solver. Algorithm features: -* efficient algorithm O(N^3) complexity -* no performance overhead from additional functionality - -If you need condition number estimation or iterative refinement, use more -feature-rich version - RMatrixSolve(). - -COMMERCIAL EDITION OF ALGLIB: +* O(N^3) complexity +* no additional time consuming features, just triangular solver - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS +INPUT PARAMETERS: A - array[0..N-1,0..N-1], system matrix N - size of A B - array[0..N-1], right part -OUTPUT PARAMETERS +OUTPUT PARAMETERS: Info - return code: * -3 matrix is exactly singular (ill conditioned matrices are not recognized). @@ -9315,122 +9449,28 @@ * info=-3 => filled by zeros -- ALGLIB -- - Copyright 16.03.2015 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixsolvefast( - real_2d_array a, - ae_int_t n, - real_1d_array& b, - ae_int_t& info); -void alglib::smp_rmatrixsolvefast( - real_2d_array a, +
    void alglib::cmatrixsolvefast( + complex_2d_array a, ae_int_t n, - real_1d_array& b, - ae_int_t& info); - -
    - -
    -
    /************************************************************************* -Dense solver. - -This subroutine finds solution of the linear system A*X=B with non-square, -possibly degenerate A. System is solved in the least squares sense, and -general least squares solution X = X0 + CX*y which minimizes |A*X-B| is -returned. If A is non-degenerate, solution in the usual sense is returned. - -Algorithm features: -* automatic detection (and correct handling!) of degenerate cases -* iterative refinement -* O(N^3) complexity - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is only partially supported (some parts are - ! optimized, but most - are not). - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -INPUT PARAMETERS - A - array[0..NRows-1,0..NCols-1], system matrix - NRows - vertical size of A - NCols - horizontal size of A - B - array[0..NCols-1], right part - Threshold- a number in [0,1]. Singular values beyond Threshold are - considered zero. Set it to 0.0, if you don't understand - what it means, so the solver will choose good value on its - own. - -OUTPUT PARAMETERS - Info - return code: - * -4 SVD subroutine failed - * -1 if NRows<=0 or NCols<=0 or Threshold<0 was passed - * 1 if task is solved - Rep - solver report, see below for more info - X - array[0..N-1,0..M-1], it contains: - * solution of A*X=B (even for singular A) - * zeros, if SVD subroutine failed - -SOLVER REPORT - -Subroutine sets following fields of the Rep structure: -* R2 reciprocal of condition number: 1/cond(A), 2-norm. -* N = NCols -* K dim(Null(A)) -* CX array[0..N-1,0..K-1], kernel of A. - Columns of CX store such vectors that A*CX[i]=0. - - -- ALGLIB -- - Copyright 24.08.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::rmatrixsolvels( - real_2d_array a, - ae_int_t nrows, - ae_int_t ncols, - real_1d_array b, - double threshold, - ae_int_t& info, - densesolverlsreport& rep, - real_1d_array& x); -void alglib::smp_rmatrixsolvels( - real_2d_array a, - ae_int_t nrows, - ae_int_t ncols, - real_1d_array b, - double threshold, + complex_1d_array& b, ae_int_t& info, - densesolverlsreport& rep, - real_1d_array& x); + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -Similar to RMatrixSolve() but solves task with multiple right parts (where -b and x are NxM matrices). This is "slow-but-robust" version of linear -solver with additional functionality like condition number estimation. -There also exists faster version - RMatrixSolveMFast(). +Complex dense solver for A*X=B with N*N complex matrix A, N*M complex +matrices X and B. "Slow-but-feature-rich" version which provides +additional functions, at the cost of slower performance. Faster version +may be invoked with CMatrixSolveMFast() function. Algorithm features: * automatic detection of degenerate cases * condition number estimation -* optional iterative refinement +* iterative refinement * O(N^3+M*N^2) complexity IMPORTANT: ! this function is NOT the most efficient linear solver provided @@ -9443,43 +9483,20 @@ ! This performance penalty is especially visible in the ! multithreaded mode, because both condition number estimation ! and iterative refinement are inherently sequential - ! calculations. It also very significant on small matrices. + ! calculations. ! ! Thus, if you need high performance and if you are pretty sure ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, RMatrixSolveMFast() function. - -COMMERCIAL EDITION OF ALGLIB: + ! you to use faster solver, CMatrixSolveMFast() function. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -9498,7 +9515,7 @@ OUTPUT PARAMETERS Info - return code: - * -3 A is ill conditioned or singular. + * -3 matrix is very badly conditioned or exactly singular. X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task is solved (but matrix A may be ill-conditioned, @@ -9506,79 +9523,45 @@ Rep - additional report, following fields are set: * rep.r1 condition number in 1-norm * rep.rinf condition number in inf-norm - X - array[N], it contains: + X - array[N,M], it contains: * info>0 => solution * info=-3 => filled by zeros - -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixsolvem( - real_2d_array a, - ae_int_t n, - real_2d_array b, - ae_int_t m, - bool rfs, - ae_int_t& info, - densesolverreport& rep, - real_2d_array& x); -void alglib::smp_rmatrixsolvem( - real_2d_array a, +
    void alglib::cmatrixsolvem( + complex_2d_array a, ae_int_t n, - real_2d_array b, + complex_2d_array b, ae_int_t m, bool rfs, ae_int_t& info, densesolverreport& rep, - real_2d_array& x); + complex_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver. - -Similar to RMatrixSolve() but solves task with multiple right parts (where -b and x are NxM matrices). This is "fast" version of linear solver which -does NOT offer additional functions like condition number estimation or -iterative refinement. +Complex dense solver for A*X=B with N*N complex matrix A, N*M complex +matrices X and B. "Fast-but-lightweight" version which provides just +triangular solver - and no additional functions like iterative refinement +or condition number estimation. Algorithm features: * O(N^3+M*N^2) complexity -* no additional functionality, highest performance - -COMMERCIAL EDITION OF ALGLIB: +* no additional time consuming functions - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -9589,51 +9572,36 @@ N - size of A B - array[0..N-1,0..M-1], right part M - right part size - RFS - iterative refinement switch: - * True - refinement is used. - Less performance, more precision. - * False - refinement is not used. - More performance, less precision. -OUTPUT PARAMETERS +OUTPUT PARAMETERS: Info - return code: * -3 matrix is exactly singular (ill conditioned matrices are not recognized). - X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task is solved - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - B - array[N]: + B - array[N,M]: * info>0 => overwritten by solution * info=-3 => filled by zeros - -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 16.03.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixsolvemfast( - real_2d_array a, - ae_int_t n, - real_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_rmatrixsolvemfast( - real_2d_array a, +
    void alglib::cmatrixsolvemfast( + complex_2d_array a, ae_int_t n, - real_2d_array& b, + complex_2d_array& b, ae_int_t m, - ae_int_t& info); + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*x=b with N*N symmetric positive definite matrix A given -by its Cholesky decomposition, and N*1 real vectors x and b. This is "slow- -but-feature-rich" version of the solver which, in addition to the -solution, performs condition number estimation. +Dense solver for A*x=b with N*N Hermitian positive definite matrix A given +by its Cholesky decomposition, and N*1 complex vectors x and b. This is +"slow-but-feature-rich" version of the solver which estimates condition +number of the system. Algorithm features: * automatic detection of degenerate cases @@ -9658,14 +9626,14 @@ ! scale problems (N<50). ! ! In such cases we strongly recommend you to use faster solver, - ! SPDMatrixCholeskySolveFast() function. + ! HPDMatrixCholeskySolveFast() function. INPUT PARAMETERS - CHA - array[N,N], Cholesky decomposition, + CHA - array[0..N-1,0..N-1], Cholesky decomposition, SPDMatrixCholesky result N - size of A IsUpper - what half of CHA is provided - B - array[N], right part + B - array[0..N-1], right part OUTPUT PARAMETERS Info - return code: @@ -9683,39 +9651,40 @@ -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixcholeskysolve( - real_2d_array cha, +
    void alglib::hpdmatrixcholeskysolve( + complex_2d_array cha, ae_int_t n, bool isupper, - real_1d_array b, + complex_1d_array b, ae_int_t& info, densesolverreport& rep, - real_1d_array& x); + complex_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*x=b with N*N symmetric positive definite matrix A given -by its Cholesky decomposition, and N*1 real vectors x and b. This is "fast- -but-lightweight" version of the solver. +Dense solver for A*x=b with N*N Hermitian positive definite matrix A given +by its Cholesky decomposition, and N*1 complex vectors x and b. This is +"fast-but-lightweight" version of the solver. Algorithm features: * O(N^2) complexity * matrix is represented by its upper or lower triangle -* no additional features +* no additional time-consuming features INPUT PARAMETERS - CHA - array[N,N], Cholesky decomposition, + CHA - array[0..N-1,0..N-1], Cholesky decomposition, SPDMatrixCholesky result N - size of A IsUpper - what half of CHA is provided - B - array[N], right part + B - array[0..N-1], right part OUTPUT PARAMETERS Info - return code: * -3 A is is exactly singular or ill conditioned - X is filled by zeros in such cases. + B is filled by zeros in such cases. * -1 N<=0 was passed * 1 task is solved B - array[N]: @@ -9723,23 +9692,24 @@ * for info=-3 - filled by zeros -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey + Copyright 18.03.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixcholeskysolvefast( - real_2d_array cha, +
    void alglib::hpdmatrixcholeskysolvefast( + complex_2d_array cha, ae_int_t n, bool isupper, - real_1d_array& b, - ae_int_t& info); + complex_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B with N*N symmetric positive definite matrix A given -by its Cholesky decomposition, and N*M vectors X and B. It is "slow-but- -feature-rich" version of the solver which estimates condition number of -the system. +Dense solver for A*X=B with N*N Hermitian positive definite matrix A given +by its Cholesky decomposition and N*M complex matrices X and B. This is +"slow-but-feature-rich" version of the solver which, in addition to the +solution, estimates condition number of the system. Algorithm features: * automatic detection of degenerate cases @@ -9760,25 +9730,26 @@ ! larger - the more efficient). ! ! This performance penalty is insignificant when compared with - ! cost of large LU decomposition. However, if you call this - ! function many times for the same left side, this overhead - ! BECOMES significant. It also becomes significant for small- - ! scale problems (N<50). + ! cost of large Cholesky decomposition. However, if you call + ! this function many times for the same left side, this + ! overhead BECOMES significant. It also becomes significant + ! for small-scale problems (N<50). ! ! In such cases we strongly recommend you to use faster solver, - ! SPDMatrixCholeskySolveMFast() function. + ! HPDMatrixCholeskySolveMFast() function. + INPUT PARAMETERS - CHA - array[0..N-1,0..N-1], Cholesky decomposition, - SPDMatrixCholesky result + CHA - array[N,N], Cholesky decomposition, + HPDMatrixCholesky result N - size of CHA IsUpper - what half of CHA is provided - B - array[0..N-1,0..M-1], right part + B - array[N,M], right part M - right part size -OUTPUT PARAMETERS +OUTPUT PARAMETERS: Info - return code: - * -3 A is is exactly singular or badly conditioned + * -3 A is singular, or VERY close to singular. X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task was solved @@ -9792,50 +9763,41 @@ -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixcholeskysolvem( - real_2d_array cha, - ae_int_t n, - bool isupper, - real_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - real_2d_array& x); -void alglib::smp_spdmatrixcholeskysolvem( - real_2d_array cha, +
    void alglib::hpdmatrixcholeskysolvem( + complex_2d_array cha, ae_int_t n, bool isupper, - real_2d_array b, + complex_2d_array b, ae_int_t m, ae_int_t& info, densesolverreport& rep, - real_2d_array& x); + complex_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B with N*N symmetric positive definite matrix A given -by its Cholesky decomposition, and N*M vectors X and B. It is "fast-but- -lightweight" version of the solver which just solves linear system, -without any additional functions. +Dense solver for A*X=B with N*N Hermitian positive definite matrix A given +by its Cholesky decomposition and N*M complex matrices X and B. This is +"fast-but-lightweight" version of the solver. Algorithm features: * O(M*N^2) complexity * matrix is represented by its upper or lower triangle -* no additional functionality +* no additional time-consuming features INPUT PARAMETERS CHA - array[N,N], Cholesky decomposition, - SPDMatrixCholesky result + HPDMatrixCholesky result N - size of CHA IsUpper - what half of CHA is provided B - array[N,M], right part M - right part size -OUTPUT PARAMETERS +OUTPUT PARAMETERS: Info - return code: - * -3 A is is exactly singular or badly conditioned + * -3 A is singular, or VERY close to singular. X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task was solved @@ -9846,27 +9808,21 @@ -- ALGLIB -- Copyright 18.03.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixcholeskysolvemfast( - real_2d_array cha, - ae_int_t n, - bool isupper, - real_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_spdmatrixcholeskysolvemfast( - real_2d_array cha, +
    void alglib::hpdmatrixcholeskysolvemfast( + complex_2d_array cha, ae_int_t n, bool isupper, - real_2d_array& b, + complex_2d_array& b, ae_int_t m, - ae_int_t& info); + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense linear solver for A*x=b with N*N real symmetric positive definite -matrix A, N*1 vectors x and b. "Slow-but-feature-rich" version of the +Dense solver for A*x=b, with N*N Hermitian positive definite matrix A, and +N*1 complex vectors x and b. "Slow-but-feature-rich" version of the solver. Algorithm features: @@ -9893,38 +9849,16 @@ ! ! Thus, if you need high performance and if you are pretty sure ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, SPDMatrixSolveFast() function. - -COMMERCIAL EDITION OF ALGLIB: + ! you to use faster solver, HPDMatrixSolveFast() function. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -9937,81 +9871,45 @@ B - array[0..N-1], right part OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or non-SPD. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N], it contains: - * info>0 => solution - * info=-3 => filled by zeros + Info - same as in RMatrixSolve + Returns -3 for non-HPD matrices. + Rep - same as in RMatrixSolve + X - same as in RMatrixSolve -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixsolve( - real_2d_array a, - ae_int_t n, - bool isupper, - real_1d_array b, - ae_int_t& info, - densesolverreport& rep, - real_1d_array& x); -void alglib::smp_spdmatrixsolve( - real_2d_array a, +
    void alglib::hpdmatrixsolve( + complex_2d_array a, ae_int_t n, bool isupper, - real_1d_array b, + complex_1d_array b, ae_int_t& info, densesolverreport& rep, - real_1d_array& x); + complex_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense linear solver for A*x=b with N*N real symmetric positive definite -matrix A, N*1 vectors x and b. "Fast-but-lightweight" version of the -solver. +Dense solver for A*x=b, with N*N Hermitian positive definite matrix A, and +N*1 complex vectors x and b. "Fast-but-lightweight" version of the +solver without additional functions. Algorithm features: * O(N^3) complexity * matrix is represented by its upper or lower triangle -* no additional time consuming features like condition number estimation - -COMMERCIAL EDITION OF ALGLIB: +* no additional time consuming functions - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -10025,35 +9923,33 @@ OUTPUT PARAMETERS Info - return code: - * -3 A is is exactly singular or non-SPD + * -3 A is is exactly singular or not positive definite + X is filled by zeros in such cases. * -1 N<=0 was passed * 1 task was solved - B - array[N], it contains: - * info>0 => solution - * info=-3 => filled by zeros + B - array[0..N-1]: + * overwritten by solution + * zeros, if A is exactly singular (diagonal of its LU + decomposition has exact zeros). -- ALGLIB -- Copyright 17.03.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixsolvefast( - real_2d_array a, - ae_int_t n, - bool isupper, - real_1d_array& b, - ae_int_t& info); -void alglib::smp_spdmatrixsolvefast( - real_2d_array a, +
    void alglib::hpdmatrixsolvefast( + complex_2d_array a, ae_int_t n, bool isupper, - real_1d_array& b, - ae_int_t& info); + complex_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B with N*N symmetric positive definite matrix A, and -N*M vectors X and B. It is "slow-but-feature-rich" version of the solver. +Dense solver for A*X=B, with N*N Hermitian positive definite matrix A and +N*M complex matrices X and B. "Slow-but-feature-rich" version of the +solver. Algorithm features: * automatic detection of degenerate cases @@ -10068,49 +9964,26 @@ IMPORTANT: ! this function is NOT the most efficient linear solver provided ! by ALGLIB. It estimates condition number of linear system, - ! which results in significant performance penalty when - ! compared with "fast" version which just performs Cholesky - ! decomposition and calls triangular solver. + ! which results in significant performance penalty when + ! compared with "fast" version which just calls triangular + ! solver. ! - ! This performance penalty is especially visible in the - ! multithreaded mode, because both condition number estimation - ! and iterative refinement are inherently sequential - ! calculations. + ! This performance penalty is especially apparent when you use + ! ALGLIB parallel capabilities (condition number estimation is + ! inherently sequential). It also becomes significant for + ! small-scale problems (N<100). ! - ! Thus, if you need high performance and if you are pretty sure - ! that your system is well conditioned, we strongly recommend - ! you to use faster solver, SPDMatrixSolveMFast() function. - -COMMERCIAL EDITION OF ALGLIB: + ! In such cases we strongly recommend you to use faster solver, + ! HPDMatrixSolveMFast() function. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -10124,82 +9997,45 @@ M - right part size OUTPUT PARAMETERS - Info - return code: - * -3 matrix is very badly conditioned or non-SPD. - * -1 N<=0 was passed - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - additional report, following fields are set: - * rep.r1 condition number in 1-norm - * rep.rinf condition number in inf-norm - X - array[N,M], it contains: - * info>0 => solution - * info=-3 => filled by zeros + Info - same as in RMatrixSolve. + Returns -3 for non-HPD matrices. + Rep - same as in RMatrixSolve + X - same as in RMatrixSolve -- ALGLIB -- Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixsolvem( - real_2d_array a, +
    void alglib::hpdmatrixsolvem( + complex_2d_array a, ae_int_t n, bool isupper, - real_2d_array b, + complex_2d_array b, ae_int_t m, ae_int_t& info, densesolverreport& rep, - real_2d_array& x); -void alglib::smp_spdmatrixsolvem( - real_2d_array a, - ae_int_t n, - bool isupper, - real_2d_array b, - ae_int_t m, - ae_int_t& info, - densesolverreport& rep, - real_2d_array& x); + complex_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Dense solver for A*X=B with N*N symmetric positive definite matrix A, and -N*M vectors X and B. It is "fast-but-lightweight" version of the solver. +Dense solver for A*X=B, with N*N Hermitian positive definite matrix A and +N*M complex matrices X and B. "Fast-but-lightweight" version of the solver. Algorithm features: * O(N^3+M*N^2) complexity * matrix is represented by its upper or lower triangle -* no additional time consuming features - -COMMERCIAL EDITION OF ALGLIB: +* no additional time consuming features like condition number estimation - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -10214,3818 +10050,4691 @@ OUTPUT PARAMETERS Info - return code: - * -3 A is is exactly singular + * -3 A is is exactly singular or is not positive definite. + B is filled by zeros in such cases. * -1 N<=0 was passed - * 1 task was solved - B - array[N,M], it contains: - * info>0 => solution - * info=-3 => filled by zeros + * 1 task is solved + B - array[0..N-1]: + * overwritten by solution + * zeros, if problem was not solved -- ALGLIB -- Copyright 17.03.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixsolvemfast( - real_2d_array a, - ae_int_t n, - bool isupper, - real_2d_array& b, - ae_int_t m, - ae_int_t& info); -void alglib::smp_spdmatrixsolvemfast( - real_2d_array a, +
    void alglib::hpdmatrixsolvemfast( + complex_2d_array a, ae_int_t n, bool isupper, - real_2d_array& b, + complex_2d_array& b, ae_int_t m, - ae_int_t& info); + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - - - +
     
    /************************************************************************* +Dense solver. -*************************************************************************/ -
    class decisionforest -{ -}; +This subroutine solves a system A*x=b, where A is NxN non-denegerate +real matrix given by its LU decomposition, x and b are real vectors. This +is "slow-but-robust" version of the linear LU-based solver. Faster version +is RMatrixLUSolveFast() function. -
    - -
    -
    /************************************************************************* +Algorithm features: +* automatic detection of degenerate cases +* O(N^2) complexity +* condition number estimation -*************************************************************************/ -
    class dfreport -{ - double relclserror; - double avgce; - double rmserror; - double avgerror; - double avgrelerror; - double oobrelclserror; - double oobavgce; - double oobrmserror; - double oobavgerror; - double oobavgrelerror; -}; +No iterative refinement is provided because exact form of original matrix +is not known to subroutine. Use RMatrixSolve or RMatrixMixedSolve if you +need iterative refinement. -
    - -
    -
    /************************************************************************* -Average cross-entropy (in bits per element) on the test set +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system, + ! which results in 10-15x performance penalty when compared + ! with "fast" version which just calls triangular solver. + ! + ! This performance penalty is insignificant when compared with + ! cost of large LU decomposition. However, if you call this + ! function many times for the same left side, this overhead + ! BECOMES significant. It also becomes significant for small- + ! scale problems. + ! + ! In such cases we strongly recommend you to use faster solver, + ! RMatrixLUSolveFast() function. -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +INPUT PARAMETERS + LUA - array[N,N], LU decomposition, RMatrixLU result + P - array[N], pivots array, RMatrixLU result + N - size of A + B - array[N], right part + +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or exactly singular. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N], it contains: + * info>0 => solution + * info=-3 => filled by zeros -RESULT: - CrossEntropy/(NPoints*LN(2)). - Zero if model solves regression task. -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::dfavgce( - decisionforest df, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::rmatrixlusolve( + real_2d_array lua, + integer_1d_array p, + ae_int_t n, + real_1d_array b, + ae_int_t& info, + densesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average error on the test set +Dense solver. -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +This subroutine solves a system A*x=b, where A is NxN non-denegerate +real matrix given by its LU decomposition, x and b are real vectors. This +is "fast-without-any-checks" version of the linear LU-based solver. Slower +but more robust version is RMatrixLUSolve() function. -RESULT: - Its meaning for regression task is obvious. As for - classification task, it means average error when estimating posterior - probabilities. +Algorithm features: +* O(N^2) complexity +* fast algorithm without ANY additional checks, just triangular solver + +INPUT PARAMETERS + LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result + P - array[0..N-1], pivots array, RMatrixLU result + N - size of A + B - array[0..N-1], right part + +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is exactly singular (ill conditioned matrices + are not recognized). + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task is solved + B - array[N]: + * info>0 => overwritten by solution + * info=-3 => filled by zeros -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 18.03.2015 by Bochkanov Sergey *************************************************************************/ -
    double alglib::dfavgerror( - decisionforest df, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::rmatrixlusolvefast( + real_2d_array lua, + integer_1d_array p, + ae_int_t n, + real_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average relative error on the test set +Dense solver. -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +Similar to RMatrixLUSolve() but solves task with multiple right parts +(where b and x are NxM matrices). This is "robust-but-slow" version of +LU-based solver which performs additional checks for non-degeneracy of +inputs (condition number estimation). If you need best performance, use +"fast-without-any-checks" version, RMatrixLUSolveMFast(). -RESULT: - Its meaning for regression task is obvious. As for - classification task, it means average relative error when estimating - posterior probability of belonging to the correct class. +Algorithm features: +* automatic detection of degenerate cases +* O(M*N^2) complexity +* condition number estimation - -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::dfavgrelerror( - decisionforest df, - real_2d_array xy, - ae_int_t npoints); +No iterative refinement is provided because exact form of original matrix +is not known to subroutine. Use RMatrixSolve or RMatrixMixedSolve if you +need iterative refinement. -
    - -
    -
    /************************************************************************* -This subroutine builds random decision forest. +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system, + ! which results in significant performance penalty when + ! compared with "fast" version which just calls triangular + ! solver. + ! + ! This performance penalty is especially apparent when you use + ! ALGLIB parallel capabilities (condition number estimation is + ! inherently sequential). It also becomes significant for + ! small-scale problems. + ! + ! In such cases we strongly recommend you to use faster solver, + ! RMatrixLUSolveMFast() function. -INPUT PARAMETERS: - XY - training set - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - task type: - * NClasses=1 - regression task with one - dependent variable - * NClasses>1 - classification task with - NClasses classes. - NTrees - number of trees in a forest, NTrees>=1. - recommended values: 50-100. - R - percent of a training set used to build - individual trees. 0<R<=1. - recommended values: 0.1 <= R <= 0.66. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS + LUA - array[N,N], LU decomposition, RMatrixLU result + P - array[N], pivots array, RMatrixLU result + N - size of A + B - array[0..N-1,0..M-1], right part + M - right part size + +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or exactly singular. + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N,M], it contains: + * info>0 => solution + * info=-3 => filled by zeros -OUTPUT PARAMETERS: - Info - return code: - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0 - or R>1). - * 1, if task has been solved - DF - model built - Rep - training report, contains error on a training set - and out-of-bag estimates of generalization error. -- ALGLIB -- - Copyright 19.02.2009 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::dfbuildrandomdecisionforest( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t ntrees, - double r, +
    void alglib::rmatrixlusolvem( + real_2d_array lua, + integer_1d_array p, + ae_int_t n, + real_2d_array b, + ae_int_t m, ae_int_t& info, - decisionforest& df, - dfreport& rep); + densesolverreport& rep, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine builds random decision forest. -This function gives ability to tune number of variables used when choosing -best split. +Dense solver. + +Similar to RMatrixLUSolve() but solves task with multiple right parts, +where b and x are NxM matrices. This is "fast-without-any-checks" version +of LU-based solver. It does not estimate condition number of a system, +so it is extremely fast. If you need better detection of near-degenerate +cases, use RMatrixLUSolveM() function. + +Algorithm features: +* O(M*N^2) complexity +* fast algorithm without ANY additional checks, just triangular solver + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - XY - training set - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - task type: - * NClasses=1 - regression task with one - dependent variable - * NClasses>1 - classification task with - NClasses classes. - NTrees - number of trees in a forest, NTrees>=1. - recommended values: 50-100. - NRndVars - number of variables used when choosing best split - R - percent of a training set used to build - individual trees. 0<R<=1. - recommended values: 0.1 <= R <= 0.66. + LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result + P - array[0..N-1], pivots array, RMatrixLU result + N - size of A + B - array[0..N-1,0..M-1], right part + M - right part size OUTPUT PARAMETERS: - Info - return code: - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints<1, NVars<1, NClasses<1, NTrees<1, R<=0 - or R>1). - * 1, if task has been solved - DF - model built - Rep - training report, contains error on a training set - and out-of-bag estimates of generalization error. + Info - return code: + * -3 matrix is exactly singular (ill conditioned matrices + are not recognized). + * -1 N<=0 was passed + * 1 task is solved + B - array[N,M]: + * info>0 => overwritten by solution + * info=-3 => filled by zeros -- ALGLIB -- - Copyright 19.02.2009 by Bochkanov Sergey + Copyright 18.03.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::dfbuildrandomdecisionforestx1( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t ntrees, - ae_int_t nrndvars, - double r, +
    void alglib::rmatrixlusolvemfast( + real_2d_array lua, + integer_1d_array p, + ae_int_t n, + real_2d_array& b, + ae_int_t m, ae_int_t& info, - decisionforest& df, - dfreport& rep); + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Procesing +Dense solver. -INPUT PARAMETERS: - DF - decision forest model - X - input vector, array[0..NVars-1]. +This subroutine solves a system A*x=b, where BOTH ORIGINAL A AND ITS +LU DECOMPOSITION ARE KNOWN. You can use it if for some reasons you have +both A and its LU decomposition. -OUTPUT PARAMETERS: - Y - result. Regression estimate when solving regression task, - vector of posterior probabilities for classification task. +Algorithm features: +* automatic detection of degenerate cases +* condition number estimation +* iterative refinement +* O(N^2) complexity -See also DFProcessI. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result + P - array[0..N-1], pivots array, RMatrixLU result + N - size of A + B - array[0..N-1], right part + +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or exactly singular. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N], it contains: + * info>0 => solution + * info=-3 => filled by zeros -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::dfprocess( - decisionforest df, - real_1d_array x, - real_1d_array& y); +
    void alglib::rmatrixmixedsolve( + real_2d_array a, + real_2d_array lua, + integer_1d_array p, + ae_int_t n, + real_1d_array b, + ae_int_t& info, + densesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -'interactive' variant of DFProcess for languages like Python which support -constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter - -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +Dense solver. - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::dfprocessi( - decisionforest df, - real_1d_array x, - real_1d_array& y); +Similar to RMatrixMixedSolve() but solves task with multiple right parts +(where b and x are NxM matrices). -
    - -
    -
    /************************************************************************* -Relative classification error on the test set +Algorithm features: +* automatic detection of degenerate cases +* condition number estimation +* iterative refinement +* O(M*N^2) complexity -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + LUA - array[0..N-1,0..N-1], LU decomposition, RMatrixLU result + P - array[0..N-1], pivots array, RMatrixLU result + N - size of A + B - array[0..N-1,0..M-1], right part + M - right part size -RESULT: - percent of incorrectly classified cases. - Zero if model solves regression task. +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or exactly singular. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N,M], it contains: + * info>0 => solution + * info=-3 => filled by zeros -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::dfrelclserror( - decisionforest df, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::rmatrixmixedsolvem( + real_2d_array a, + real_2d_array lua, + integer_1d_array p, + ae_int_t n, + real_2d_array b, + ae_int_t m, + ae_int_t& info, + densesolverreport& rep, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -RMS error on the test set - -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +Dense solver for A*x=b with N*N real matrix A and N*1 real vectorx x and +b. This is "slow-but-feature rich" version of the linear solver. Faster +version is RMatrixSolveFast() function. -RESULT: - root mean square error. - Its meaning for regression task is obvious. As for - classification task, RMS error means error when estimating posterior - probabilities. +Algorithm features: +* automatic detection of degenerate cases +* condition number estimation +* iterative refinement +* O(N^3) complexity - -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::dfrmserror( - decisionforest df, - real_2d_array xy, - ae_int_t npoints); +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system + ! and performs iterative refinement, which results in + ! significant performance penalty when compared with "fast" + ! version which just performs LU decomposition and calls + ! triangular solver. + ! + ! This performance penalty is especially visible in the + ! multithreaded mode, because both condition number estimation + ! and iterative refinement are inherently sequential + ! calculations. It is also very significant on small matrices. + ! + ! Thus, if you need high performance and if you are pretty sure + ! that your system is well conditioned, we strongly recommend + ! you to use faster solver, RMatrixSolveFast() function. -
    - -
    -
    /************************************************************************* -This function serializes data structure to string. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. -*************************************************************************/ -
    void dfserialize(decisionforest &obj, std::string &s_out); -
    - -
    -
    /************************************************************************* -This function unserializes data structure from string. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + B - array[0..N-1], right part + +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or exactly singular. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N], it contains: + * info>0 => solution + * info=-3 => filled by zeros + + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    void dfunserialize(std::string &s_in, decisionforest &obj); +
    void alglib::rmatrixsolve( + real_2d_array a, + ae_int_t n, + real_1d_array b, + ae_int_t& info, + densesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault); +
    - - - +
     
    /************************************************************************* -Complete elliptic integral of the second kind - -Approximates the integral +Dense solver. +This subroutine solves a system A*x=b, where A is NxN non-denegerate +real matrix, x and b are vectors. This is a "fast" version of linear +solver which does NOT provide any additional functions like condition +number estimation or iterative refinement. - pi/2 - - - | | 2 -E(m) = | sqrt( 1 - m sin t ) dt - | | - - - 0 +Algorithm features: +* efficient algorithm O(N^3) complexity +* no performance overhead from additional functionality -using the approximation +If you need condition number estimation or iterative refinement, use more +feature-rich version - RMatrixSolve(). - P(x) - x log x Q(x). + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -ACCURACY: +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + B - array[0..N-1], right part - Relative error: -arithmetic domain # trials peak rms - IEEE 0, 1 10000 2.1e-16 7.3e-17 +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is exactly singular (ill conditioned matrices + are not recognized). + * -1 N<=0 was passed + * 1 task is solved + B - array[N]: + * info>0 => overwritten by solution + * info=-3 => filled by zeros -Cephes Math Library, Release 2.8: June, 2000 -Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 16.03.2015 by Bochkanov Sergey *************************************************************************/ -
    double alglib::ellipticintegrale(double m); +
    void alglib::rmatrixsolvefast( + real_2d_array a, + ae_int_t n, + real_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complete elliptic integral of the first kind - -Approximates the integral +Dense solver. +This subroutine finds solution of the linear system A*X=B with non-square, +possibly degenerate A. System is solved in the least squares sense, and +general least squares solution X = X0 + CX*y which minimizes |A*X-B| is +returned. If A is non-degenerate, solution in the usual sense is returned. +Algorithm features: +* automatic detection (and correct handling!) of degenerate cases +* iterative refinement +* O(N^3) complexity - pi/2 - - - | | - | dt -K(m) = | ------------------ - | 2 - | | sqrt( 1 - m sin t ) - - - 0 + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -using the approximation +INPUT PARAMETERS + A - array[0..NRows-1,0..NCols-1], system matrix + NRows - vertical size of A + NCols - horizontal size of A + B - array[0..NCols-1], right part + Threshold- a number in [0,1]. Singular values beyond Threshold are + considered zero. Set it to 0.0, if you don't understand + what it means, so the solver will choose good value on its + own. - P(x) - log x Q(x). +OUTPUT PARAMETERS + Info - return code: + * -4 SVD subroutine failed + * -1 if NRows<=0 or NCols<=0 or Threshold<0 was passed + * 1 if task is solved + Rep - solver report, see below for more info + X - array[0..N-1,0..M-1], it contains: + * solution of A*X=B (even for singular A) + * zeros, if SVD subroutine failed -ACCURACY: +SOLVER REPORT - Relative error: -arithmetic domain # trials peak rms - IEEE 0,1 30000 2.5e-16 6.8e-17 +Subroutine sets following fields of the Rep structure: +* R2 reciprocal of condition number: 1/cond(A), 2-norm. +* N = NCols +* K dim(Null(A)) +* CX array[0..N-1,0..K-1], kernel of A. + Columns of CX store such vectors that A*CX[i]=0. -Cephes Math Library, Release 2.8: June, 2000 -Copyright 1984, 1987, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 24.08.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::ellipticintegralk(double m); +
    void alglib::rmatrixsolvels( + real_2d_array a, + ae_int_t nrows, + ae_int_t ncols, + real_1d_array b, + double threshold, + ae_int_t& info, + densesolverlsreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complete elliptic integral of the first kind - -Approximates the integral - - +Dense solver. - pi/2 - - - | | - | dt -K(m) = | ------------------ - | 2 - | | sqrt( 1 - m sin t ) - - - 0 +Similar to RMatrixSolve() but solves task with multiple right parts (where +b and x are NxM matrices). This is "slow-but-robust" version of linear +solver with additional functionality like condition number estimation. +There also exists faster version - RMatrixSolveMFast(). -where m = 1 - m1, using the approximation +Algorithm features: +* automatic detection of degenerate cases +* condition number estimation +* optional iterative refinement +* O(N^3+M*N^2) complexity - P(x) - log x Q(x). +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system + ! and performs iterative refinement, which results in + ! significant performance penalty when compared with "fast" + ! version which just performs LU decomposition and calls + ! triangular solver. + ! + ! This performance penalty is especially visible in the + ! multithreaded mode, because both condition number estimation + ! and iterative refinement are inherently sequential + ! calculations. It also very significant on small matrices. + ! + ! Thus, if you need high performance and if you are pretty sure + ! that your system is well conditioned, we strongly recommend + ! you to use faster solver, RMatrixSolveMFast() function. -The argument m1 is used rather than m so that the logarithmic -singularity at m = 1 will be shifted to the origin; this -preserves maximum accuracy. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -K(0) = pi/2. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + B - array[0..N-1,0..M-1], right part + M - right part size + RFS - iterative refinement switch: + * True - refinement is used. + Less performance, more precision. + * False - refinement is not used. + More performance, less precision. -ACCURACY: +OUTPUT PARAMETERS + Info - return code: + * -3 A is ill conditioned or singular. + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N], it contains: + * info>0 => solution + * info=-3 => filled by zeros - Relative error: -arithmetic domain # trials peak rms - IEEE 0,1 30000 2.5e-16 6.8e-17 -Cephes Math Library, Release 2.8: June, 2000 -Copyright 1984, 1987, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::ellipticintegralkhighprecision(double m1); +
    void alglib::rmatrixsolvem( + real_2d_array a, + ae_int_t n, + real_2d_array b, + ae_int_t m, + bool rfs, + ae_int_t& info, + densesolverreport& rep, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Incomplete elliptic integral of the second kind +Dense solver. -Approximates the integral +Similar to RMatrixSolve() but solves task with multiple right parts (where +b and x are NxM matrices). This is "fast" version of linear solver which +does NOT offer additional functions like condition number estimation or +iterative refinement. +Algorithm features: +* O(N^3+M*N^2) complexity +* no additional functionality, highest performance - phi - - - | | - | 2 -E(phi_\m) = | sqrt( 1 - m sin t ) dt - | - | | - - - 0 + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -of amplitude phi and modulus m, using the arithmetic - -geometric mean algorithm. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + B - array[0..N-1,0..M-1], right part + M - right part size + RFS - iterative refinement switch: + * True - refinement is used. + Less performance, more precision. + * False - refinement is not used. + More performance, less precision. -ACCURACY: +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is exactly singular (ill conditioned matrices + are not recognized). + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task is solved + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + B - array[N]: + * info>0 => overwritten by solution + * info=-3 => filled by zeros -Tested at random arguments with phi in [-10, 10] and m in -[0, 1]. - Relative error: -arithmetic domain # trials peak rms - IEEE -10,10 150000 3.3e-15 1.4e-16 -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1993, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::incompleteellipticintegrale(double phi, double m); +
    void alglib::rmatrixsolvemfast( + real_2d_array a, + ae_int_t n, + real_2d_array& b, + ae_int_t m, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Incomplete elliptic integral of the first kind F(phi|m) - -Approximates the integral +Dense solver for A*x=b with N*N symmetric positive definite matrix A given +by its Cholesky decomposition, and N*1 real vectors x and b. This is "slow- +but-feature-rich" version of the solver which, in addition to the +solution, performs condition number estimation. +Algorithm features: +* automatic detection of degenerate cases +* O(N^2) complexity +* condition number estimation +* matrix is represented by its upper or lower triangle +No iterative refinement is provided because such partial representation of +matrix does not allow efficient calculation of extra-precise matrix-vector +products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you +need iterative refinement. - phi - - - | | - | dt -F(phi_\m) = | ------------------ - | 2 - | | sqrt( 1 - m sin t ) - - - 0 +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system, + ! which results in 10-15x performance penalty when compared + ! with "fast" version which just calls triangular solver. + ! + ! This performance penalty is insignificant when compared with + ! cost of large LU decomposition. However, if you call this + ! function many times for the same left side, this overhead + ! BECOMES significant. It also becomes significant for small- + ! scale problems (N<50). + ! + ! In such cases we strongly recommend you to use faster solver, + ! SPDMatrixCholeskySolveFast() function. -of amplitude phi and modulus m, using the arithmetic - -geometric mean algorithm. +INPUT PARAMETERS + CHA - array[N,N], Cholesky decomposition, + SPDMatrixCholesky result + N - size of A + IsUpper - what half of CHA is provided + B - array[N], right part +OUTPUT PARAMETERS + Info - return code: + * -3 A is is exactly singular or ill conditioned + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task is solved + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N]: + * for info>0 - solution + * for info=-3 - filled by zeros + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spdmatrixcholeskysolve( + real_2d_array cha, + ae_int_t n, + bool isupper, + real_1d_array b, + ae_int_t& info, + densesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault); +
    + +
    +
    /************************************************************************* +Dense solver for A*x=b with N*N symmetric positive definite matrix A given +by its Cholesky decomposition, and N*1 real vectors x and b. This is "fast- +but-lightweight" version of the solver. -ACCURACY: +Algorithm features: +* O(N^2) complexity +* matrix is represented by its upper or lower triangle +* no additional features -Tested at random points with m in [0, 1] and phi as indicated. +INPUT PARAMETERS + CHA - array[N,N], Cholesky decomposition, + SPDMatrixCholesky result + N - size of A + IsUpper - what half of CHA is provided + B - array[N], right part - Relative error: -arithmetic domain # trials peak rms - IEEE -10,10 200000 7.4e-16 1.0e-16 +OUTPUT PARAMETERS + Info - return code: + * -3 A is is exactly singular or ill conditioned + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task is solved + B - array[N]: + * for info>0 - overwritten by solution + * for info=-3 - filled by zeros -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::incompleteellipticintegralk(double phi, double m); +
    void alglib::spdmatrixcholeskysolvefast( + real_2d_array cha, + ae_int_t n, + bool isupper, + real_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - - - +
     
    /************************************************************************* -Finding the eigenvalues and eigenvectors of a Hermitian matrix - -The algorithm finds eigen pairs of a Hermitian matrix by reducing it to -real tridiagonal form and using the QL/QR algorithm. +Dense solver for A*X=B with N*N symmetric positive definite matrix A given +by its Cholesky decomposition, and N*M vectors X and B. It is "slow-but- +feature-rich" version of the solver which estimates condition number of +the system. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Algorithm features: +* automatic detection of degenerate cases +* O(M*N^2) complexity +* condition number estimation +* matrix is represented by its upper or lower triangle -Input parameters: - A - Hermitian matrix which is given by its upper or lower - triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - storage format. - ZNeeded - flag controlling whether the eigenvectors are needed or - not. If ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. +No iterative refinement is provided because such partial representation of +matrix does not allow efficient calculation of extra-precise matrix-vector +products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you +need iterative refinement. -Output parameters: - D - eigenvalues in ascending order. - Array whose index ranges within [0..N-1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains the eigenvectors. - Array whose indexes range within [0..N-1, 0..N-1]. - The eigenvectors are stored in the matrix columns. +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system, + ! which results in significant performance penalty when + ! compared with "fast" version which just calls triangular + ! solver. Amount of overhead introduced depends on M (the + ! larger - the more efficient). + ! + ! This performance penalty is insignificant when compared with + ! cost of large LU decomposition. However, if you call this + ! function many times for the same left side, this overhead + ! BECOMES significant. It also becomes significant for small- + ! scale problems (N<50). + ! + ! In such cases we strongly recommend you to use faster solver, + ! SPDMatrixCholeskySolveMFast() function. -Result: - True, if the algorithm has converged. - False, if the algorithm hasn't converged (rare case). +INPUT PARAMETERS + CHA - array[0..N-1,0..N-1], Cholesky decomposition, + SPDMatrixCholesky result + N - size of CHA + IsUpper - what half of CHA is provided + B - array[0..N-1,0..M-1], right part + M - right part size -Note: - eigenvectors of Hermitian matrix are defined up to multiplication by - a complex number L, such that |L|=1. +OUTPUT PARAMETERS + Info - return code: + * -3 A is is exactly singular or badly conditioned + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task was solved + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N]: + * for info>0 contains solution + * for info=-3 filled by zeros -- ALGLIB -- - Copyright 2005, 23 March 2007 by Bochkanov Sergey + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::hmatrixevd( - complex_2d_array a, +
    void alglib::spdmatrixcholeskysolvem( + real_2d_array cha, ae_int_t n, - ae_int_t zneeded, bool isupper, - real_1d_array& d, - complex_2d_array& z); + real_2d_array b, + ae_int_t m, + ae_int_t& info, + densesolverreport& rep, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Subroutine for finding the eigenvalues and eigenvectors of a Hermitian -matrix with given indexes by using bisection and inverse iteration methods - -Input parameters: - A - Hermitian matrix which is given by its upper or lower - triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - ZNeeded - flag controlling whether the eigenvectors are needed or - not. If ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. - IsUpperA - storage format of matrix A. - I1, I2 - index interval for searching (from I1 to I2). - 0 <= I1 <= I2 <= N-1. - -Output parameters: - W - array of the eigenvalues found. - Array whose index ranges within [0..I2-I1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains eigenvectors. - Array whose indexes range within [0..N-1, 0..I2-I1]. - In that case, the eigenvectors are stored in the matrix - columns. +Dense solver for A*X=B with N*N symmetric positive definite matrix A given +by its Cholesky decomposition, and N*M vectors X and B. It is "fast-but- +lightweight" version of the solver which just solves linear system, +without any additional functions. -Result: - True, if successful. W contains the eigenvalues, Z contains the - eigenvectors (if needed). +Algorithm features: +* O(M*N^2) complexity +* matrix is represented by its upper or lower triangle +* no additional functionality - False, if the bisection method subroutine wasn't able to find the - eigenvalues in the given interval or if the inverse iteration - subroutine wasn't able to find all the corresponding eigenvectors. - In that case, the eigenvalues and eigenvectors are not returned. +INPUT PARAMETERS + CHA - array[N,N], Cholesky decomposition, + SPDMatrixCholesky result + N - size of CHA + IsUpper - what half of CHA is provided + B - array[N,M], right part + M - right part size -Note: - eigen vectors of Hermitian matrix are defined up to multiplication by - a complex number L, such as |L|=1. +OUTPUT PARAMETERS + Info - return code: + * -3 A is is exactly singular or badly conditioned + X is filled by zeros in such cases. + * -1 N<=0 was passed + * 1 task was solved + B - array[N]: + * for info>0 overwritten by solution + * for info=-3 filled by zeros -- ALGLIB -- - Copyright 07.01.2006, 24.03.2007 by Bochkanov Sergey. + Copyright 18.03.2015 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::hmatrixevdi( - complex_2d_array a, +
    void alglib::spdmatrixcholeskysolvemfast( + real_2d_array cha, ae_int_t n, - ae_int_t zneeded, bool isupper, - ae_int_t i1, - ae_int_t i2, - real_1d_array& w, - complex_2d_array& z); + real_2d_array& b, + ae_int_t m, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Subroutine for finding the eigenvalues (and eigenvectors) of a Hermitian -matrix in a given half-interval (A, B] by using a bisection and inverse -iteration +Dense linear solver for A*x=b with N*N real symmetric positive definite +matrix A, N*1 vectors x and b. "Slow-but-feature-rich" version of the +solver. -Input parameters: - A - Hermitian matrix which is given by its upper or lower - triangular part. Array whose indexes range within - [0..N-1, 0..N-1]. - N - size of matrix A. - ZNeeded - flag controlling whether the eigenvectors are needed or - not. If ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. - IsUpperA - storage format of matrix A. - B1, B2 - half-interval (B1, B2] to search eigenvalues in. +Algorithm features: +* automatic detection of degenerate cases +* condition number estimation +* O(N^3) complexity +* matrix is represented by its upper or lower triangle -Output parameters: - M - number of eigenvalues found in a given half-interval, M>=0 - W - array of the eigenvalues found. - Array whose index ranges within [0..M-1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains eigenvectors. - Array whose indexes range within [0..N-1, 0..M-1]. - The eigenvectors are stored in the matrix columns. +No iterative refinement is provided because such partial representation of +matrix does not allow efficient calculation of extra-precise matrix-vector +products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you +need iterative refinement. -Result: - True, if successful. M contains the number of eigenvalues in the given - half-interval (could be equal to 0), W contains the eigenvalues, - Z contains the eigenvectors (if needed). +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system, + ! which results in significant performance penalty when + ! compared with "fast" version which just performs Cholesky + ! decomposition and calls triangular solver. + ! + ! This performance penalty is especially visible in the + ! multithreaded mode, because both condition number estimation + ! and iterative refinement are inherently sequential + ! calculations. + ! + ! Thus, if you need high performance and if you are pretty sure + ! that your system is well conditioned, we strongly recommend + ! you to use faster solver, SPDMatrixSolveFast() function. - False, if the bisection method subroutine wasn't able to find the - eigenvalues in the given interval or if the inverse iteration - subroutine wasn't able to find all the corresponding eigenvectors. - In that case, the eigenvalues and eigenvectors are not returned, M is - equal to 0. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -Note: - eigen vectors of Hermitian matrix are defined up to multiplication by - a complex number L, such as |L|=1. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + IsUpper - what half of A is provided + B - array[0..N-1], right part + +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or non-SPD. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N], it contains: + * info>0 => solution + * info=-3 => filled by zeros -- ALGLIB -- - Copyright 07.01.2006, 24.03.2007 by Bochkanov Sergey. + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::hmatrixevdr( - complex_2d_array a, +
    void alglib::spdmatrixsolve( + real_2d_array a, ae_int_t n, - ae_int_t zneeded, bool isupper, - double b1, - double b2, - ae_int_t& m, - real_1d_array& w, - complex_2d_array& z); + real_1d_array b, + ae_int_t& info, + densesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Finding eigenvalues and eigenvectors of a general (unsymmetric) matrix +Dense linear solver for A*x=b with N*N real symmetric positive definite +matrix A, N*1 vectors x and b. "Fast-but-lightweight" version of the +solver. -COMMERCIAL EDITION OF ALGLIB: +Algorithm features: +* O(N^3) complexity +* matrix is represented by its upper or lower triangle +* no additional time consuming features like condition number estimation - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Speed-up provided by MKL for this particular problem (EVD) - ! is really high, because MKL uses combination of (a) better low-level - ! optimizations, and (b) better EVD algorithms. - ! - ! On one particular SSE-capable machine for N=1024, commercial MKL- - ! -capable ALGLIB was: - ! * 7-10 times faster than open source "generic C" version - ! * 15-18 times faster than "pure C#" version + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Multithreaded acceleration is NOT supported for this function. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -The algorithm finds eigenvalues and eigenvectors of a general matrix by -using the QR algorithm with multiple shifts. The algorithm can find -eigenvalues and both left and right eigenvectors. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + IsUpper - what half of A is provided + B - array[0..N-1], right part -The right eigenvector is a vector x such that A*x = w*x, and the left -eigenvector is a vector y such that y'*A = w*y' (here y' implies a complex -conjugate transposition of vector y). +OUTPUT PARAMETERS + Info - return code: + * -3 A is is exactly singular or non-SPD + * -1 N<=0 was passed + * 1 task was solved + B - array[N], it contains: + * info>0 => solution + * info=-3 => filled by zeros -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - VNeeded - flag controlling whether eigenvectors are needed or not. - If VNeeded is equal to: - * 0, eigenvectors are not returned; - * 1, right eigenvectors are returned; - * 2, left eigenvectors are returned; - * 3, both left and right eigenvectors are returned. + -- ALGLIB -- + Copyright 17.03.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spdmatrixsolvefast( + real_2d_array a, + ae_int_t n, + bool isupper, + real_1d_array& b, + ae_int_t& info, + const xparams _params = alglib::xdefault); -Output parameters: - WR - real parts of eigenvalues. - Array whose index ranges within [0..N-1]. - WR - imaginary parts of eigenvalues. - Array whose index ranges within [0..N-1]. - VL, VR - arrays of left and right eigenvectors (if they are needed). - If WI[i]=0, the respective eigenvalue is a real number, - and it corresponds to the column number I of matrices VL/VR. - If WI[i]>0, we have a pair of complex conjugate numbers with - positive and negative imaginary parts: - the first eigenvalue WR[i] + sqrt(-1)*WI[i]; - the second eigenvalue WR[i+1] + sqrt(-1)*WI[i+1]; - WI[i]>0 - WI[i+1] = -WI[i] < 0 - In that case, the eigenvector corresponding to the first - eigenvalue is located in i and i+1 columns of matrices - VL/VR (the column number i contains the real part, and the - column number i+1 contains the imaginary part), and the vector - corresponding to the second eigenvalue is a complex conjugate to - the first vector. - Arrays whose indexes range within [0..N-1, 0..N-1]. +
    + +
    +
    /************************************************************************* +Dense solver for A*X=B with N*N symmetric positive definite matrix A, and +N*M vectors X and B. It is "slow-but-feature-rich" version of the solver. -Result: - True, if the algorithm has converged. - False, if the algorithm has not converged. +Algorithm features: +* automatic detection of degenerate cases +* condition number estimation +* O(N^3+M*N^2) complexity +* matrix is represented by its upper or lower triangle -Note 1: - Some users may ask the following question: what if WI[N-1]>0? - WI[N] must contain an eigenvalue which is complex conjugate to the - N-th eigenvalue, but the array has only size N? - The answer is as follows: such a situation cannot occur because the - algorithm finds a pairs of eigenvalues, therefore, if WI[i]>0, I is - strictly less than N-1. +No iterative refinement is provided because such partial representation of +matrix does not allow efficient calculation of extra-precise matrix-vector +products for large matrices. Use RMatrixSolve or RMatrixMixedSolve if you +need iterative refinement. -Note 2: - The algorithm performance depends on the value of the internal parameter - NS of the InternalSchurDecomposition subroutine which defines the number - of shifts in the QR algorithm (similarly to the block width in block-matrix - algorithms of linear algebra). If you require maximum performance - on your machine, it is recommended to adjust this parameter manually. +IMPORTANT: ! this function is NOT the most efficient linear solver provided + ! by ALGLIB. It estimates condition number of linear system, + ! which results in significant performance penalty when + ! compared with "fast" version which just performs Cholesky + ! decomposition and calls triangular solver. + ! + ! This performance penalty is especially visible in the + ! multithreaded mode, because both condition number estimation + ! and iterative refinement are inherently sequential + ! calculations. + ! + ! Thus, if you need high performance and if you are pretty sure + ! that your system is well conditioned, we strongly recommend + ! you to use faster solver, SPDMatrixSolveMFast() function. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -See also the InternalTREVC subroutine. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + IsUpper - what half of A is provided + B - array[0..N-1,0..M-1], right part + M - right part size -The algorithm is based on the LAPACK 3.0 library. +OUTPUT PARAMETERS + Info - return code: + * -3 matrix is very badly conditioned or non-SPD. + * -1 N<=0 was passed + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - additional report, following fields are set: + * rep.r1 condition number in 1-norm + * rep.rinf condition number in inf-norm + X - array[N,M], it contains: + * info>0 => solution + * info=-3 => filled by zeros + + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::rmatrixevd( +
    void alglib::spdmatrixsolvem( real_2d_array a, ae_int_t n, - ae_int_t vneeded, - real_1d_array& wr, - real_1d_array& wi, - real_2d_array& vl, - real_2d_array& vr); + bool isupper, + real_2d_array b, + ae_int_t m, + ae_int_t& info, + densesolverreport& rep, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Finding the eigenvalues and eigenvectors of a symmetric matrix - -The algorithm finds eigen pairs of a symmetric matrix by reducing it to -tridiagonal form and using the QL/QR algorithm. +Dense solver for A*X=B with N*N symmetric positive definite matrix A, and +N*M vectors X and B. It is "fast-but-lightweight" version of the solver. -COMMERCIAL EDITION OF ALGLIB: +Algorithm features: +* O(N^3+M*N^2) complexity +* matrix is represented by its upper or lower triangle +* no additional time consuming features - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -Input parameters: - A - symmetric matrix which is given by its upper or lower - triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - ZNeeded - flag controlling whether the eigenvectors are needed or not. - If ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. - IsUpper - storage format. - -Output parameters: - D - eigenvalues in ascending order. - Array whose index ranges within [0..N-1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains the eigenvectors. - Array whose indexes range within [0..N-1, 0..N-1]. - The eigenvectors are stored in the matrix columns. +INPUT PARAMETERS + A - array[0..N-1,0..N-1], system matrix + N - size of A + IsUpper - what half of A is provided + B - array[0..N-1,0..M-1], right part + M - right part size -Result: - True, if the algorithm has converged. - False, if the algorithm hasn't converged (rare case). +OUTPUT PARAMETERS + Info - return code: + * -3 A is is exactly singular + * -1 N<=0 was passed + * 1 task was solved + B - array[N,M], it contains: + * info>0 => solution + * info=-3 => filled by zeros -- ALGLIB -- - Copyright 2005-2008 by Bochkanov Sergey + Copyright 17.03.2015 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixevd( +
    void alglib::spdmatrixsolvemfast( real_2d_array a, ae_int_t n, - ae_int_t zneeded, bool isupper, - real_1d_array& d, - real_2d_array& z); + real_2d_array& b, + ae_int_t m, + ae_int_t& info, + const xparams _params = alglib::xdefault);
    - + +
    + +sparsesolverreport
    + +sparsecholeskysolvesks
    +sparselusolve
    +sparsesolve
    +sparsesolvesks
    + + + +
    solvesks_d_1 Solving positive definite sparse system using Skyline (SKS) solver
    +
     
    /************************************************************************* -Subroutine for finding the eigenvalues and eigenvectors of a symmetric -matrix with given indexes by using bisection and inverse iteration methods. +This structure is a sparse solver report. -Input parameters: - A - symmetric matrix which is given by its upper or lower - triangular part. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - ZNeeded - flag controlling whether the eigenvectors are needed or not. - If ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. - IsUpperA - storage format of matrix A. - I1, I2 - index interval for searching (from I1 to I2). - 0 <= I1 <= I2 <= N-1. +Following fields can be accessed by users: +*************************************************************************/ +
    class sparsesolverreport +{ + ae_int_t terminationtype; +}; -Output parameters: - W - array of the eigenvalues found. - Array whose index ranges within [0..I2-I1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains eigenvectors. - Array whose indexes range within [0..N-1, 0..I2-I1]. - In that case, the eigenvectors are stored in the matrix columns. +
    + +
    +
    /************************************************************************* +Sparse linear solver for A*x=b with N*N real symmetric positive definite +matrix A given by its Cholesky decomposition, and N*1 vectors x and b. -Result: - True, if successful. W contains the eigenvalues, Z contains the - eigenvectors (if needed). +IMPORTANT: this solver requires input matrix to be in the SKS (Skyline) + sparse storage format. An exception will be generated if you + pass matrix in some other format (HASH or CRS). - False, if the bisection method subroutine wasn't able to find the - eigenvalues in the given interval or if the inverse iteration subroutine - wasn't able to find all the corresponding eigenvectors. - In that case, the eigenvalues and eigenvectors are not returned. +INPUT PARAMETERS + A - sparse NxN matrix stored in SKS format, must be NxN exactly + N - size of A, N>0 + IsUpper - which half of A is provided (another half is ignored) + B - array[N], right part + +OUTPUT PARAMETERS + Rep - solver report, following fields are set: + * rep.terminationtype - solver status; >0 for success, + set to -3 on failure (degenerate or non-SPD system). + X - array[N], it contains: + * rep.terminationtype>0 => solution + * rep.terminationtype=-3 => filled by zeros -- ALGLIB -- - Copyright 07.01.2006 by Bochkanov Sergey + Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixevdi( - real_2d_array a, +
    void alglib::sparsecholeskysolvesks( + sparsematrix a, ae_int_t n, - ae_int_t zneeded, bool isupper, - ae_int_t i1, - ae_int_t i2, - real_1d_array& w, - real_2d_array& z); + real_1d_array b, + sparsesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Subroutine for finding the eigenvalues (and eigenvectors) of a symmetric -matrix in a given half open interval (A, B] by using a bisection and -inverse iteration - -Input parameters: - A - symmetric matrix which is given by its upper or lower - triangular part. Array [0..N-1, 0..N-1]. - N - size of matrix A. - ZNeeded - flag controlling whether the eigenvectors are needed or not. - If ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. - IsUpperA - storage format of matrix A. - B1, B2 - half open interval (B1, B2] to search eigenvalues in. +Sparse linear solver for A*x=b with general (nonsymmetric) N*N sparse real +matrix A given by its LU factorization, N*1 vectors x and b. -Output parameters: - M - number of eigenvalues found in a given half-interval (M>=0). - W - array of the eigenvalues found. - Array whose index ranges within [0..M-1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains eigenvectors. - Array whose indexes range within [0..N-1, 0..M-1]. - The eigenvectors are stored in the matrix columns. +IMPORTANT: this solver requires input matrix to be in the CRS sparse + storage format. An exception will be generated if you pass + matrix in some other format (HASH or SKS). -Result: - True, if successful. M contains the number of eigenvalues in the given - half-interval (could be equal to 0), W contains the eigenvalues, - Z contains the eigenvectors (if needed). +INPUT PARAMETERS + A - LU factorization of the sparse matrix, must be NxN exactly + in CRS storage format + P, Q - pivot indexes from LU factorization + N - size of A, N>0 + B - array[0..N-1], right part - False, if the bisection method subroutine wasn't able to find the - eigenvalues in the given interval or if the inverse iteration subroutine - wasn't able to find all the corresponding eigenvectors. - In that case, the eigenvalues and eigenvectors are not returned, - M is equal to 0. +OUTPUT PARAMETERS + X - array[N], it contains: + * rep.terminationtype>0 => solution + * rep.terminationtype=-3 => filled by zeros + Rep - solver report, following fields are set: + * rep.terminationtype - solver status; >0 for success, + set to -3 on failure (degenerate system). -- ALGLIB -- - Copyright 07.01.2006 by Bochkanov Sergey + Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixevdr( - real_2d_array a, +
    void alglib::sparselusolve( + sparsematrix a, + integer_1d_array p, + integer_1d_array q, ae_int_t n, - ae_int_t zneeded, - bool isupper, - double b1, - double b2, - ae_int_t& m, - real_1d_array& w, - real_2d_array& z); + real_1d_array b, + real_1d_array& x, + sparsesolverreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Finding the eigenvalues and eigenvectors of a tridiagonal symmetric matrix +Sparse linear solver for A*x=b with general (nonsymmetric) N*N sparse real +matrix A, N*1 vectors x and b. -The algorithm finds the eigen pairs of a tridiagonal symmetric matrix by -using an QL/QR algorithm with implicit shifts. - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -Input parameters: - D - the main diagonal of a tridiagonal matrix. - Array whose index ranges within [0..N-1]. - E - the secondary diagonal of a tridiagonal matrix. - Array whose index ranges within [0..N-2]. - N - size of matrix A. - ZNeeded - flag controlling whether the eigenvectors are needed or not. - If ZNeeded is equal to: - * 0, the eigenvectors are not needed; - * 1, the eigenvectors of a tridiagonal matrix - are multiplied by the square matrix Z. It is used if the - tridiagonal matrix is obtained by the similarity - transformation of a symmetric matrix; - * 2, the eigenvectors of a tridiagonal matrix replace the - square matrix Z; - * 3, matrix Z contains the first row of the eigenvectors - matrix. - Z - if ZNeeded=1, Z contains the square matrix by which the - eigenvectors are multiplied. - Array whose indexes range within [0..N-1, 0..N-1]. +This solver converts input matrix to CRS format, performs LU factorization +and uses sparse triangular solvers to get solution of the original system. -Output parameters: - D - eigenvalues in ascending order. - Array whose index ranges within [0..N-1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains the product of a given matrix (from the left) - and the eigenvectors matrix (from the right); - * 2, Z contains the eigenvectors. - * 3, Z contains the first row of the eigenvectors matrix. - If ZNeeded<3, Z is the array whose indexes range within [0..N-1, 0..N-1]. - In that case, the eigenvectors are stored in the matrix columns. - If ZNeeded=3, Z is the array whose indexes range within [0..0, 0..N-1]. +INPUT PARAMETERS + A - sparse matrix, must be NxN exactly, any storage format + N - size of A, N>0 + B - array[0..N-1], right part -Result: - True, if the algorithm has converged. - False, if the algorithm hasn't converged. +OUTPUT PARAMETERS + X - array[N], it contains: + * rep.terminationtype>0 => solution + * rep.terminationtype=-3 => filled by zeros + Rep - solver report, following fields are set: + * rep.terminationtype - solver status; >0 for success, + set to -3 on failure (degenerate system). - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB -- + Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixtdevd( - real_1d_array& d, - real_1d_array e, +
    void alglib::sparsesolve( + sparsematrix a, ae_int_t n, - ae_int_t zneeded, - real_2d_array& z); + real_1d_array b, + real_1d_array& x, + sparsesolverreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Subroutine for finding tridiagonal matrix eigenvalues/vectors with given -indexes (in ascending order) by using the bisection and inverse iteraion. - -Input parameters: - D - the main diagonal of a tridiagonal matrix. - Array whose index ranges within [0..N-1]. - E - the secondary diagonal of a tridiagonal matrix. - Array whose index ranges within [0..N-2]. - N - size of matrix. N>=0. - ZNeeded - flag controlling whether the eigenvectors are needed or not. - If ZNeeded is equal to: - * 0, the eigenvectors are not needed; - * 1, the eigenvectors of a tridiagonal matrix are multiplied - by the square matrix Z. It is used if the - tridiagonal matrix is obtained by the similarity transformation - of a symmetric matrix. - * 2, the eigenvectors of a tridiagonal matrix replace - matrix Z. - I1, I2 - index interval for searching (from I1 to I2). - 0 <= I1 <= I2 <= N-1. - Z - if ZNeeded is equal to: - * 0, Z isn't used and remains unchanged; - * 1, Z contains the square matrix (array whose indexes range within [0..N-1, 0..N-1]) - which reduces the given symmetric matrix to tridiagonal form; - * 2, Z isn't used (but changed on the exit). - -Output parameters: - D - array of the eigenvalues found. - Array whose index ranges within [0..I2-I1]. - Z - if ZNeeded is equal to: - * 0, doesn't contain any information; - * 1, contains the product of a given NxN matrix Z (from the left) and - Nx(I2-I1) matrix of the eigenvectors found (from the right). - Array whose indexes range within [0..N-1, 0..I2-I1]. - * 2, contains the matrix of the eigenvalues found. - Array whose indexes range within [0..N-1, 0..I2-I1]. +Sparse linear solver for A*x=b with N*N sparse real symmetric positive +definite matrix A, N*1 vectors x and b. +This solver converts input matrix to SKS format, performs Cholesky +factorization using SKS Cholesky subroutine (works well for limited +bandwidth matrices) and uses sparse triangular solvers to get solution of +the original system. -Result: - - True, if successful. In that case, D contains the eigenvalues, - Z contains the eigenvectors (if needed). - It should be noted that the subroutine changes the size of arrays D and Z. +INPUT PARAMETERS + A - sparse matrix, must be NxN exactly + N - size of A, N>0 + IsUpper - which half of A is provided (another half is ignored) + B - array[0..N-1], right part - False, if the bisection method subroutine wasn't able to find the eigenvalues - in the given interval or if the inverse iteration subroutine wasn't able - to find all the corresponding eigenvectors. In that case, the eigenvalues - and eigenvectors are not returned. +OUTPUT PARAMETERS + Rep - solver report, following fields are set: + * rep.terminationtype - solver status; >0 for success, + set to -3 on failure (degenerate or non-SPD system). + X - array[N], it contains: + * rep.terminationtype>0 => solution + * rep.terminationtype=-3 => filled by zeros -- ALGLIB -- - Copyright 25.12.2005 by Bochkanov Sergey + Copyright 26.12.2017 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixtdevdi( - real_1d_array& d, - real_1d_array e, +
    void alglib::sparsesolvesks( + sparsematrix a, ae_int_t n, - ae_int_t zneeded, - ae_int_t i1, - ae_int_t i2, - real_2d_array& z); + bool isupper, + real_1d_array b, + sparsesolverreport& rep, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -Subroutine for finding the tridiagonal matrix eigenvalues/vectors in a -given half-interval (A, B] by using bisection and inverse iteration. - -Input parameters: - D - the main diagonal of a tridiagonal matrix. - Array whose index ranges within [0..N-1]. - E - the secondary diagonal of a tridiagonal matrix. - Array whose index ranges within [0..N-2]. - N - size of matrix, N>=0. - ZNeeded - flag controlling whether the eigenvectors are needed or not. - If ZNeeded is equal to: - * 0, the eigenvectors are not needed; - * 1, the eigenvectors of a tridiagonal matrix are multiplied - by the square matrix Z. It is used if the tridiagonal - matrix is obtained by the similarity transformation - of a symmetric matrix. - * 2, the eigenvectors of a tridiagonal matrix replace matrix Z. - A, B - half-interval (A, B] to search eigenvalues in. - Z - if ZNeeded is equal to: - * 0, Z isn't used and remains unchanged; - * 1, Z contains the square matrix (array whose indexes range - within [0..N-1, 0..N-1]) which reduces the given symmetric - matrix to tridiagonal form; - * 2, Z isn't used (but changed on the exit). +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "solvers.h" -Output parameters: - D - array of the eigenvalues found. - Array whose index ranges within [0..M-1]. - M - number of eigenvalues found in the given half-interval (M>=0). - Z - if ZNeeded is equal to: - * 0, doesn't contain any information; - * 1, contains the product of a given NxN matrix Z (from the - left) and NxM matrix of the eigenvectors found (from the - right). Array whose indexes range within [0..N-1, 0..M-1]. - * 2, contains the matrix of the eigenvectors found. - Array whose indexes range within [0..N-1, 0..M-1]. +using namespace alglib; -Result: - True, if successful. In that case, M contains the number of eigenvalues - in the given half-interval (could be equal to 0), D contains the eigenvalues, - Z contains the eigenvectors (if needed). - It should be noted that the subroutine changes the size of arrays D and Z. +int main(int argc, char **argv) +{ + // + // This example demonstrates creation/initialization of the sparse matrix + // in the SKS (Skyline) storage format and solution using SKS-based direct + // solver. + // + // First, we have to create matrix and initialize it. Matrix is created + // in the SKS format, using fixed bandwidth initialization function. + // Several points should be noted: + // + // 1. SKS sparse storage format also allows variable bandwidth matrices; + // we just do not want to overcomplicate this example. + // + // 2. SKS format requires you to specify matrix geometry prior to + // initialization of its elements with sparseset(). If you specified + // bandwidth=1, you can not change your mind afterwards and call + // sparseset() for non-existent elements. + // + // 3. Because SKS solver need just one triangle of SPD matrix, we can + // omit initialization of the lower triangle of our matrix. + // + ae_int_t n = 4; + ae_int_t bandwidth = 1; + sparsematrix s; + sparsecreatesksband(n, n, bandwidth, s); + sparseset(s, 0, 0, 2.0); + sparseset(s, 0, 1, 1.0); + sparseset(s, 1, 1, 3.0); + sparseset(s, 1, 2, 1.0); + sparseset(s, 2, 2, 3.0); + sparseset(s, 2, 3, 1.0); + sparseset(s, 3, 3, 2.0); - False, if the bisection method subroutine wasn't able to find the - eigenvalues in the given interval or if the inverse iteration subroutine - wasn't able to find all the corresponding eigenvectors. In that case, - the eigenvalues and eigenvectors are not returned, M is equal to 0. + // + // Now we have symmetric positive definite 4x4 system width bandwidth=1: + // + // [ 2 1 ] [ x0]] [ 4 ] + // [ 1 3 1 ] [ x1 ] [ 10 ] + // [ 1 3 1 ] * [ x2 ] = [ 15 ] + // [ 1 2 ] [ x3 ] [ 11 ] + // + // After successful creation we can call SKS solver. + // + real_1d_array b = "[4,10,15,11]"; + sparsesolverreport rep; + real_1d_array x; + bool isuppertriangle = true; + sparsesolvesks(s, n, isuppertriangle, b, rep, x); + printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [1.0000, 2.0000, 3.0000, 4.0000] + return 0; +} - -- ALGLIB -- - Copyright 31.03.2008 by Bochkanov Sergey -*************************************************************************/ -
    bool alglib::smatrixtdevdr( - real_1d_array& d, - real_1d_array e, - ae_int_t n, - ae_int_t zneeded, - double a, - double b, - ae_int_t& m, - real_2d_array& z); -
    - + - +
     
    /************************************************************************* -Exponential integral Ei(x) +Complete elliptic integral of the second kind - x - - t - | | e - Ei(x) = -|- --- dt . - | | t - - - -inf +Approximates the integral -Not defined for x <= 0. -See also expn.c. + pi/2 + - + | | 2 +E(m) = | sqrt( 1 - m sin t ) dt + | | + - + 0 + +using the approximation + P(x) - x log x Q(x). ACCURACY: Relative error: arithmetic domain # trials peak rms - IEEE 0,100 50000 8.6e-16 1.3e-16 + IEEE 0, 1 10000 2.1e-16 7.3e-17 -Cephes Math Library Release 2.8: May, 1999 -Copyright 1999 by Stephen L. Moshier +Cephes Math Library, Release 2.8: June, 2000 +Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::exponentialintegralei(double x); +
    double alglib::ellipticintegrale( + double m, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Exponential integral En(x) +Complete elliptic integral of the first kind -Evaluates the exponential integral +Approximates the integral - inf. - - - | | -xt - | e - E (x) = | ---- dt. - n | n - | | t - - - 1 -Both n and x must be nonnegative. + pi/2 + - + | | + | dt +K(m) = | ------------------ + | 2 + | | sqrt( 1 - m sin t ) + - + 0 -The routine employs either a power series, a continued -fraction, or an asymptotic formula depending on the -relative values of n and x. +using the approximation + + P(x) - log x Q(x). ACCURACY: Relative error: arithmetic domain # trials peak rms - IEEE 0, 30 10000 1.7e-15 3.6e-16 + IEEE 0,1 30000 2.5e-16 6.8e-17 -Cephes Math Library Release 2.8: June, 2000 -Copyright 1985, 2000 by Stephen L. Moshier +Cephes Math Library, Release 2.8: June, 2000 +Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::exponentialintegralen(double x, ae_int_t n); +
    double alglib::ellipticintegralk( + double m, + const xparams _params = alglib::xdefault);
    - -
    - -fcdistribution
    -fdistribution
    -invfdistribution
    - - -
    - +
     
    /************************************************************************* -Complemented F distribution +Complete elliptic integral of the first kind -Returns the area from x to infinity under the F density -function (also known as Snedcor's density or the -variance ratio density). +Approximates the integral - inf. - - - 1 | | a-1 b-1 -1-P(x) = ------ | t (1-t) dt - B(a,b) | | - - - x + pi/2 + - + | | + | dt +K(m) = | ------------------ + | 2 + | | sqrt( 1 - m sin t ) + - + 0 + +where m = 1 - m1, using the approximation -The incomplete beta integral is used, according to the -formula + P(x) - log x Q(x). -P(x) = incbet( df2/2, df1/2, (df2/(df2 + df1*x) ). +The argument m1 is used rather than m so that the logarithmic +singularity at m = 1 will be shifted to the origin; this +preserves maximum accuracy. +K(0) = pi/2. ACCURACY: -Tested at random points (a,b,x) in the indicated intervals. - x a,b Relative error: -arithmetic domain domain # trials peak rms - IEEE 0,1 1,100 100000 3.7e-14 5.9e-16 - IEEE 1,5 1,100 100000 8.0e-15 1.6e-15 - IEEE 0,1 1,10000 100000 1.8e-11 3.5e-13 - IEEE 1,5 1,10000 100000 2.0e-11 3.0e-12 + Relative error: +arithmetic domain # trials peak rms + IEEE 0,1 30000 2.5e-16 6.8e-17 -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +Cephes Math Library, Release 2.8: June, 2000 +Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::fcdistribution(ae_int_t a, ae_int_t b, double x); +
    double alglib::ellipticintegralkhighprecision( + double m1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -F distribution +Incomplete elliptic integral of the second kind -Returns the area from zero to x under the F density -function (also known as Snedcor's density or the -variance ratio density). This is the density -of x = (u1/df1)/(u2/df2), where u1 and u2 are random -variables having Chi square distributions with df1 -and df2 degrees of freedom, respectively. -The incomplete beta integral is used, according to the -formula +Approximates the integral -P(x) = incbet( df1/2, df2/2, (df1*x/(df2 + df1*x) ). + phi + - + | | + | 2 +E(phi_\m) = | sqrt( 1 - m sin t ) dt + | + | | + - + 0 -The arguments a and b are greater than zero, and x is -nonnegative. +of amplitude phi and modulus m, using the arithmetic - +geometric mean algorithm. ACCURACY: -Tested at random points (a,b,x). - - x a,b Relative error: -arithmetic domain domain # trials peak rms - IEEE 0,1 0,100 100000 9.8e-15 1.7e-15 - IEEE 1,5 0,100 100000 6.5e-15 3.5e-16 - IEEE 0,1 1,10000 100000 2.2e-11 3.3e-12 - IEEE 1,5 1,10000 100000 1.1e-11 1.7e-13 +Tested at random arguments with phi in [-10, 10] and m in +[0, 1]. + Relative error: +arithmetic domain # trials peak rms + IEEE -10,10 150000 3.3e-15 1.4e-16 Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1993, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::fdistribution(ae_int_t a, ae_int_t b, double x); +
    double alglib::incompleteellipticintegrale( + double phi, + double m, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Inverse of complemented F distribution - -Finds the F density argument x such that the integral -from x to infinity of the F density is equal to the -given probability p. - -This is accomplished using the inverse beta integral -function and the relations - - z = incbi( df2/2, df1/2, p ) - x = df2 (1-z) / (df1 z). - -Note: the following relations hold for the inverse of -the uncomplemented F distribution: +Incomplete elliptic integral of the first kind F(phi|m) - z = incbi( df1/2, df2/2, p ) - x = df2 z / (df1 (1-z)). +Approximates the integral -ACCURACY: -Tested at random points (a,b,p). - a,b Relative error: -arithmetic domain # trials peak rms - For p between .001 and 1: - IEEE 1,100 100000 8.3e-15 4.7e-16 - IEEE 1,10000 100000 2.1e-11 1.4e-13 - For p between 10^-6 and 10^-3: - IEEE 1,100 50000 1.3e-12 8.4e-15 - IEEE 1,10000 50000 3.0e-12 4.8e-14 + phi + - + | | + | dt +F(phi_\m) = | ------------------ + | 2 + | | sqrt( 1 - m sin t ) + - + 0 + +of amplitude phi and modulus m, using the arithmetic - +geometric mean algorithm. + + + + +ACCURACY: + +Tested at random points with m in [0, 1] and phi as indicated. + + Relative error: +arithmetic domain # trials peak rms + IEEE -10,10 200000 7.4e-16 1.0e-16 Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::invfdistribution(ae_int_t a, ae_int_t b, double y); +
    double alglib::incompleteellipticintegralk( + double phi, + double m, + const xparams _params = alglib::xdefault);
    - + - +
     
    /************************************************************************* -1-dimensional complex FFT. - -Array size N may be arbitrary number (composite or prime). Composite N's -are handled with cache-oblivious variation of a Cooley-Tukey algorithm. -Small prime-factors are transformed using hard coded codelets (similar to -FFTW codelets, but without low-level optimization), large prime-factors -are handled with Bluestein's algorithm. - -Fastests transforms are for smooth N's (prime factors are 2, 3, 5 only), -most fast for powers of 2. When N have prime factors larger than these, -but orders of magnitude smaller than N, computations will be about 4 times -slower than for nearby highly composite N's. When N itself is prime, speed -will be 6 times lower. - -Algorithm has O(N*logN) complexity for any N (composite or prime). - -INPUT PARAMETERS - A - array[0..N-1] - complex function to be transformed - N - problem size - -OUTPUT PARAMETERS - A - DFT of a input array, array[0..N-1] - A_out[j] = SUM(A_in[k]*exp(-2*pi*sqrt(-1)*j*k/N), k = 0..N-1) - +This object stores state of the subspace iteration algorithm. - -- ALGLIB -- - Copyright 29.05.2009 by Bochkanov Sergey +You should use ALGLIB functions to work with this object. *************************************************************************/ -
    void alglib::fftc1d(complex_1d_array& a); -void alglib::fftc1d(complex_1d_array& a, ae_int_t n); +
    class eigsubspacereport +{ + ae_int_t iterationscount; +};
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -1-dimensional complex inverse FFT. +This object stores state of the subspace iteration algorithm. -Array size N may be arbitrary number (composite or prime). Algorithm has -O(N*logN) complexity for any N (composite or prime). +You should use ALGLIB functions to work with this object. +*************************************************************************/ +
    class eigsubspacestate +{ +}; -See FFTC1D() description for more information about algorithm performance. +
    + +
    +
    /************************************************************************* +This function initializes subspace iteration solver. This solver is used +to solve symmetric real eigenproblems where just a few (top K) eigenvalues +and corresponding eigenvectors is required. + +This solver can be significantly faster than complete EVD decomposition +in the following case: +* when only just a small fraction of top eigenpairs of dense matrix is + required. When K approaches N, this solver is slower than complete dense + EVD +* when problem matrix is sparse (and/or is not known explicitly, i.e. only + matrix-matrix product can be performed) + +USAGE (explicit dense/sparse matrix): +1. User initializes algorithm state with eigsubspacecreate() call +2. [optional] User tunes solver parameters by calling eigsubspacesetcond() + or other functions +3. User calls eigsubspacesolvedense() or eigsubspacesolvesparse() methods, + which take algorithm state and 2D array or alglib.sparsematrix object. + +USAGE (out-of-core mode): +1. User initializes algorithm state with eigsubspacecreate() call +2. [optional] User tunes solver parameters by calling eigsubspacesetcond() + or other functions +3. User activates out-of-core mode of the solver and repeatedly calls + communication functions in a loop like below: + > alglib.eigsubspaceoocstart(state) + > while alglib.eigsubspaceooccontinue(state) do + > alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M) + > alglib.eigsubspaceoocgetrequestdata(state, out X) + > [calculate Y=A*X, with X=R^NxM] + > alglib.eigsubspaceoocsendresult(state, in Y) + > alglib.eigsubspaceoocstop(state, out W, out Z, out Report) -INPUT PARAMETERS - A - array[0..N-1] - complex array to be transformed - N - problem size +INPUT PARAMETERS: + N - problem dimensionality, N>0 + K - number of top eigenvector to calculate, 0<K<=N. -OUTPUT PARAMETERS - A - inverse DFT of a input array, array[0..N-1] - A_out[j] = SUM(A_in[k]/N*exp(+2*pi*sqrt(-1)*j*k/N), k = 0..N-1) +OUTPUT PARAMETERS: + State - structure which stores algorithm state +NOTE: if you solve many similar EVD problems you may find it useful to + reuse previous subspace as warm-start point for new EVD problem. It + can be done with eigsubspacesetwarmstart() function. -- ALGLIB -- - Copyright 29.05.2009 by Bochkanov Sergey + Copyright 16.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fftc1dinv(complex_1d_array& a); -void alglib::fftc1dinv(complex_1d_array& a, ae_int_t n); +
    void alglib::eigsubspacecreate( + ae_int_t n, + ae_int_t k, + eigsubspacestate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -1-dimensional real FFT. - -Algorithm has O(N*logN) complexity for any N (composite or prime). +Buffered version of constructor which aims to reuse previously allocated +memory as much as possible. -INPUT PARAMETERS - A - array[0..N-1] - real function to be transformed - N - problem size + -- ALGLIB -- + Copyright 16.01.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::eigsubspacecreatebuf( + ae_int_t n, + ae_int_t k, + eigsubspacestate state, + const xparams _params = alglib::xdefault); -OUTPUT PARAMETERS - F - DFT of a input array, array[0..N-1] - F[j] = SUM(A[k]*exp(-2*pi*sqrt(-1)*j*k/N), k = 0..N-1) +
    + +
    +
    /************************************************************************* +This function performs subspace iteration in the out-of-core mode. It +should be used in conjunction with other out-of-core-related functions of +this subspackage in a loop like below: -NOTE: - F[] satisfies symmetry property F[k] = conj(F[N-k]), so just one half -of array is usually needed. But for convinience subroutine returns full -complex array (with frequencies above N/2), so its result may be used by -other FFT-related subroutines. +> alglib.eigsubspaceoocstart(state) +> while alglib.eigsubspaceooccontinue(state) do +> alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M) +> alglib.eigsubspaceoocgetrequestdata(state, out X) +> [calculate Y=A*X, with X=R^NxM] +> alglib.eigsubspaceoocsendresult(state, in Y) +> alglib.eigsubspaceoocstop(state, out W, out Z, out Report) -- ALGLIB -- - Copyright 01.06.2009 by Bochkanov Sergey + Copyright 16.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fftr1d(real_1d_array a, complex_1d_array& f); -void alglib::fftr1d(real_1d_array a, ae_int_t n, complex_1d_array& f); +
    bool alglib::eigsubspaceooccontinue( + eigsubspacestate state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -1-dimensional real inverse FFT. - -Algorithm has O(N*logN) complexity for any N (composite or prime). - -INPUT PARAMETERS - F - array[0..floor(N/2)] - frequencies from forward real FFT - N - problem size +This function is used to retrieve information about out-of-core request +sent by solver to user code: matrix X (array[N,RequestSize) which have to +be multiplied by out-of-core matrix A in a product A*X. -OUTPUT PARAMETERS - A - inverse DFT of a input array, array[0..N-1] +This function returns just request data; in order to get size of the data +prior to processing requestm, use eigsubspaceoocgetrequestinfo(). -NOTE: - F[] should satisfy symmetry property F[k] = conj(F[N-k]), so just one -half of frequencies array is needed - elements from 0 to floor(N/2). F[0] -is ALWAYS real. If N is even F[floor(N/2)] is real too. If N is odd, then -F[floor(N/2)] has no special properties. +It should be used in conjunction with other out-of-core-related functions +of this subspackage in a loop like below: -Relying on properties noted above, FFTR1DInv subroutine uses only elements -from 0th to floor(N/2)-th. It ignores imaginary part of F[0], and in case -N is even it ignores imaginary part of F[floor(N/2)] too. +> alglib.eigsubspaceoocstart(state) +> while alglib.eigsubspaceooccontinue(state) do +> alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M) +> alglib.eigsubspaceoocgetrequestdata(state, out X) +> [calculate Y=A*X, with X=R^NxM] +> alglib.eigsubspaceoocsendresult(state, in Y) +> alglib.eigsubspaceoocstop(state, out W, out Z, out Report) -When you call this function using full arguments list - "FFTR1DInv(F,N,A)" -- you can pass either either frequencies array with N elements or reduced -array with roughly N/2 elements - subroutine will successfully transform -both. +INPUT PARAMETERS: + State - solver running in out-of-core mode + X - possibly preallocated storage; reallocated if + needed, left unchanged, if large enough to store + request data. -If you call this function using reduced arguments list - "FFTR1DInv(F,A)" -- you must pass FULL array with N elements (although higher N/2 are still -not used) because array size is used to automatically determine FFT length +OUTPUT PARAMETERS: + X - array[N,RequestSize] or larger, leading rectangle + is filled with dense matrix X. -- ALGLIB -- - Copyright 01.06.2009 by Bochkanov Sergey + Copyright 16.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fftr1dinv(complex_1d_array f, real_1d_array& a); -void alglib::fftr1dinv(complex_1d_array f, ae_int_t n, real_1d_array& a); +
    void alglib::eigsubspaceoocgetrequestdata( + eigsubspacestate state, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "fasttransforms.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // first we demonstrate forward FFT:
    -    // [1i,1i,1i,1i] is converted to [4i, 0, 0, 0]
    -    //
    -    complex_1d_array z = "[1i,1i,1i,1i]";
    -    fftc1d(z);
    -    printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [4i,0,0,0]
    -
    -    //
    -    // now we convert [4i, 0, 0, 0] back to [1i,1i,1i,1i]
    -    // with backward FFT
    -    //
    -    fftc1dinv(z);
    -    printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [1i,1i,1i,1i]
    -    return 0;
    -}
    +
    /************************************************************************* +This function is used to retrieve information about out-of-core request +sent by solver to user code: request type (current version of the solver +sends only requests for matrix-matrix products) and request size (size of +the matrices being multiplied). +This function returns just request metrics; in order to get contents of +the matrices being multiplied, use eigsubspaceoocgetrequestdata(). -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "fasttransforms.h"
    +It should be used in conjunction with other out-of-core-related  functions
    +of this subspackage in a loop like below:
     
    -using namespace alglib;
    +> alglib.eigsubspaceoocstart(state)
    +> while alglib.eigsubspaceooccontinue(state) do
    +>     alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M)
    +>     alglib.eigsubspaceoocgetrequestdata(state, out X)
    +>     [calculate  Y=A*X, with X=R^NxM]
    +>     alglib.eigsubspaceoocsendresult(state, in Y)
    +> alglib.eigsubspaceoocstop(state, out W, out Z, out Report)
     
    +INPUT PARAMETERS:
    +    State           -   solver running in out-of-core mode
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // first we demonstrate forward FFT:
    -    // [0,1,0,1i] is converted to [1+1i, -1-1i, -1-1i, 1+1i]
    -    //
    -    complex_1d_array z = "[0,1,0,1i]";
    -    fftc1d(z);
    -    printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [1+1i, -1-1i, -1-1i, 1+1i]
    +OUTPUT PARAMETERS:
    +    RequestType     -   type of the request to process:
    +                        * 0 - for matrix-matrix product A*X, with A  being
    +                          NxN matrix whose eigenvalues/vectors are needed,
    +                          and X being NxREQUESTSIZE one which is  returned
    +                          by the eigsubspaceoocgetrequestdata().
    +    RequestSize     -   size of the X matrix (number of columns),  usually
    +                        it is several times larger than number of  vectors
    +                        K requested by user.
     
    -    //
    -    // now we convert result back with backward FFT
    -    //
    -    fftc1dinv(z);
    -    printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [0,1,0,1i]
    -    return 0;
    -}
     
    +  -- ALGLIB --
    +     Copyright 16.01.2017 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::eigsubspaceoocgetrequestinfo( + eigsubspacestate state, + ae_int_t& requesttype, + ae_int_t& requestsize, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "fasttransforms.h"
    +
    /************************************************************************* +This function is used to send user reply to out-of-core request sent by +solver. Usually it is product A*X for returned by solver matrix X. -using namespace alglib; +It should be used in conjunction with other out-of-core-related functions +of this subspackage in a loop like below: +> alglib.eigsubspaceoocstart(state) +> while alglib.eigsubspaceooccontinue(state) do +> alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M) +> alglib.eigsubspaceoocgetrequestdata(state, out X) +> [calculate Y=A*X, with X=R^NxM] +> alglib.eigsubspaceoocsendresult(state, in Y) +> alglib.eigsubspaceoocstop(state, out W, out Z, out Report) -int main(int argc, char **argv) -{ - // - // first we demonstrate forward FFT: - // [1,1,1,1] is converted to [4, 0, 0, 0] - // - real_1d_array x = "[1,1,1,1]"; - complex_1d_array f; - real_1d_array x2; - fftr1d(x, f); - printf("%s\n", f.tostring(3).c_str()); // EXPECTED: [4,0,0,0] +INPUT PARAMETERS: + State - solver running in out-of-core mode + AX - array[N,RequestSize] or larger, leading rectangle + is filled with product A*X. - // - // now we convert [4, 0, 0, 0] back to [1,1,1,1] - // with backward FFT - // - fftr1dinv(f, x2); - printf("%s\n", x2.tostring(3).c_str()); // EXPECTED: [1,1,1,1] - return 0; -} + -- ALGLIB -- + Copyright 16.01.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::eigsubspaceoocsendresult( + eigsubspacestate state, + real_2d_array ax, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "fasttransforms.h"
    -
    -using namespace alglib;
    -
    +
    /************************************************************************* +This function initiates out-of-core mode of subspace eigensolver. It +should be used in conjunction with other out-of-core-related functions of +this subspackage in a loop like below: -int main(int argc, char **argv) -{ - // - // first we demonstrate forward FFT: - // [1,2,3,4] is converted to [10, -2+2i, -2, -2-2i] - // - // note that output array is self-adjoint: - // * f[0] = conj(f[0]) - // * f[1] = conj(f[3]) - // * f[2] = conj(f[2]) - // - real_1d_array x = "[1,2,3,4]"; - complex_1d_array f; - real_1d_array x2; - fftr1d(x, f); - printf("%s\n", f.tostring(3).c_str()); // EXPECTED: [10, -2+2i, -2, -2-2i] +> alglib.eigsubspaceoocstart(state) +> while alglib.eigsubspaceooccontinue(state) do +> alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M) +> alglib.eigsubspaceoocgetrequestdata(state, out X) +> [calculate Y=A*X, with X=R^NxM] +> alglib.eigsubspaceoocsendresult(state, in Y) +> alglib.eigsubspaceoocstop(state, out W, out Z, out Report) - // - // now we convert [10, -2+2i, -2, -2-2i] back to [1,2,3,4] - // - fftr1dinv(f, x2); - printf("%s\n", x2.tostring(3).c_str()); // EXPECTED: [1,2,3,4] +INPUT PARAMETERS: + State - solver object + MType - matrix type: + * 0 for real symmetric matrix (solver assumes that + matrix being processed is symmetric; symmetric + direct eigensolver is used for smaller subproblems + arising during solution of larger "full" task) + Future versions of ALGLIB may introduce support for + other matrix types; for now, only symmetric + eigenproblems are supported. - // - // remember that F is self-adjoint? It means that we can pass just half - // (slightly larger than half) of F to inverse real FFT and still get our result. - // - // I.e. instead [10, -2+2i, -2, -2-2i] we pass just [10, -2+2i, -2] and everything works! - // - // NOTE: in this case we should explicitly pass array length (which is 4) to ALGLIB; - // if not, it will automatically use array length to determine FFT size and - // will erroneously make half-length FFT. - // - f = "[10, -2+2i, -2]"; - fftr1dinv(f, 4, x2); - printf("%s\n", x2.tostring(3).c_str()); // EXPECTED: [1,2,3,4] - return 0; -} + -- ALGLIB -- + Copyright 16.01.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::eigsubspaceoocstart( + eigsubspacestate state, + ae_int_t mtype, + const xparams _params = alglib::xdefault); -
    -
    - -fhtr1d
    -fhtr1dinv
    - - -
    - + +
     
    /************************************************************************* -1-dimensional Fast Hartley Transform. - -Algorithm has O(N*logN) complexity for any N (composite or prime). +This function finalizes out-of-core mode of subspace eigensolver. It +should be used in conjunction with other out-of-core-related functions of +this subspackage in a loop like below: -INPUT PARAMETERS - A - array[0..N-1] - real function to be transformed - N - problem size +> alglib.eigsubspaceoocstart(state) +> while alglib.eigsubspaceooccontinue(state) do +> alglib.eigsubspaceoocgetrequestinfo(state, out RequestType, out M) +> alglib.eigsubspaceoocgetrequestdata(state, out X) +> [calculate Y=A*X, with X=R^NxM] +> alglib.eigsubspaceoocsendresult(state, in Y) +> alglib.eigsubspaceoocstop(state, out W, out Z, out Report) -OUTPUT PARAMETERS - A - FHT of a input array, array[0..N-1], - A_out[k] = sum(A_in[j]*(cos(2*pi*j*k/N)+sin(2*pi*j*k/N)), j=0..N-1) +INPUT PARAMETERS: + State - solver state +OUTPUT PARAMETERS: + W - array[K], depending on solver settings: + * top K eigenvalues ordered by descending - if + eigenvectors are returned in Z + * zeros - if invariant subspace is returned in Z + Z - array[N,K], depending on solver settings either: + * matrix of eigenvectors found + * orthogonal basis of K-dimensional invariant subspace + Rep - report with additional parameters -- ALGLIB -- - Copyright 04.06.2009 by Bochkanov Sergey + Copyright 16.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fhtr1d(real_1d_array& a, ae_int_t n); +
    void alglib::eigsubspaceoocstop( + eigsubspacestate state, + real_1d_array& w, + real_2d_array& z, + eigsubspacereport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -1-dimensional inverse FHT. - -Algorithm has O(N*logN) complexity for any N (composite or prime). +This function sets stopping critera for the solver: +* error in eigenvector/value allowed by solver +* maximum number of iterations to perform + +INPUT PARAMETERS: + State - solver structure + Eps - eps>=0, with non-zero value used to tell solver that + it can stop after all eigenvalues converged with + error roughly proportional to eps*MAX(LAMBDA_MAX), + where LAMBDA_MAX is a maximum eigenvalue. + Zero value means that no check for precision is + performed. + MaxIts - maxits>=0, with non-zero value used to tell solver + that it can stop after maxits steps (no matter how + precise current estimate is) + +NOTE: passing eps=0 and maxits=0 results in automatic selection of + moderate eps as stopping criteria (1.0E-6 in current implementation, + but it may change without notice). + +NOTE: very small values of eps are possible (say, 1.0E-12), although the + larger problem you solve (N and/or K), the harder it is to find + precise eigenvectors because rounding errors tend to accumulate. + +NOTE: passing non-zero eps results in some performance penalty, roughly + equal to 2N*(2K)^2 FLOPs per iteration. These additional computations + are required in order to estimate current error in eigenvalues via + Rayleigh-Ritz process. + Most of this additional time is spent in construction of ~2Kx2K + symmetric subproblem whose eigenvalues are checked with exact + eigensolver. + This additional time is negligible if you search for eigenvalues of + the large dense matrix, but may become noticeable on highly sparse + EVD problems, where cost of matrix-matrix product is low. + If you set eps to exactly zero, Rayleigh-Ritz phase is completely + turned off. -INPUT PARAMETERS - A - array[0..N-1] - complex array to be transformed - N - problem size + -- ALGLIB -- + Copyright 16.01.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::eigsubspacesetcond( + eigsubspacestate state, + double eps, + ae_int_t maxits, + const xparams _params = alglib::xdefault); -OUTPUT PARAMETERS - A - inverse FHT of a input array, array[0..N-1] +
    + +
    +
    /************************************************************************* +This function sets warm-start mode of the solver: next call to the solver +will reuse previous subspace as warm-start point. It can significantly +speed-up convergence when you solve many similar eigenproblems. +INPUT PARAMETERS: + State - solver structure + UseWarmStart- either True or False -- ALGLIB -- - Copyright 29.05.2009 by Bochkanov Sergey + Copyright 12.11.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fhtr1dinv(real_1d_array& a, ae_int_t n); +
    void alglib::eigsubspacesetwarmstart( + eigsubspacestate state, + bool usewarmstart, + const xparams _params = alglib::xdefault);
    - -
    - -filterema
    -filterlrma
    -filtersma
    - - - - - -
    filters_d_ema EMA(alpha) filter
    filters_d_lrma LRMA(k) filter
    filters_d_sma SMA(k) filter
    - +
     
    /************************************************************************* -Filters: exponential moving averages. +This function runs eigensolver for dense NxN symmetric matrix A, given by +upper or lower triangle. -This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is -defined as filter which replaces X[] by S[]: - S[0] = X[0] - S[t] = alpha*X[t] + (1-alpha)*S[t-1] +This function can not process nonsymmetric matrices. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - alpha - 0<alpha<=1, smoothing parameter. + State - solver state + A - array[N,N], symmetric NxN matrix given by one of its + triangles + IsUpper - whether upper or lower triangle of A is given (the + other one is not referenced at all). OUTPUT PARAMETERS: - X - array, whose first N elements were processed - with EMA(alpha) + W - array[K], top K eigenvalues ordered by descending + of their absolute values + Z - array[N,K], matrix of eigenvectors found + Rep - report with additional parameters -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. - -NOTE 2: this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. - -NOTE 3: technical analytis users quite often work with EMA coefficient - expressed in DAYS instead of fractions. If you want to calculate - EMA(N), where N is a number of days, you can use alpha=2/(N+1). +NOTE: internally this function allocates a copy of NxN dense A. You should + take it into account when working with very large matrices occupying + almost all RAM. -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 16.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::filterema(real_1d_array& x, double alpha); -void alglib::filterema(real_1d_array& x, ae_int_t n, double alpha); +
    void alglib::eigsubspacesolvedenses( + eigsubspacestate state, + real_2d_array a, + bool isupper, + real_1d_array& w, + real_2d_array& z, + eigsubspacereport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Filters: linear regression moving averages. - -This filter replaces array by results of LRMA(K) filter. +This function runs eigensolver for dense NxN symmetric matrix A, given by +upper or lower triangle. -LRMA(K) is defined as filter which, for each data point, builds linear -regression model using K prevous points (point itself is included in -these K points) and calculates value of this linear model at the point in -question. +This function can not process nonsymmetric matrices. INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - K - K>=1 (K can be larger than N , such cases will be - correctly handled). Window width. K=1 corresponds to - identity transformation (nothing changes). + State - solver state + A - NxN symmetric matrix given by one of its triangles + IsUpper - whether upper or lower triangle of A is given (the + other one is not referenced at all). OUTPUT PARAMETERS: - X - array, whose first N elements were processed with SMA(K) - -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. - -NOTE 2: this algorithm makes only one pass through array and uses running - sum to speed-up calculation of the averages. Additional measures - are taken to ensure that running sum on a long sequence of zero - elements will be correctly reset to zero even in the presence of - round-off error. - -NOTE 3: this is unsymmetric version of the algorithm, which does NOT - averages points after the current one. Only X[i], X[i-1], ... are - used when calculating new value of X[i]. We should also note that - this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. + W - array[K], top K eigenvalues ordered by descending + of their absolute values + Z - array[N,K], matrix of eigenvectors found + Rep - report with additional parameters -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 16.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::filterlrma(real_1d_array& x, ae_int_t k); -void alglib::filterlrma(real_1d_array& x, ae_int_t n, ae_int_t k); +
    void alglib::eigsubspacesolvesparses( + eigsubspacestate state, + sparsematrix a, + bool isupper, + real_1d_array& w, + real_2d_array& z, + eigsubspacereport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Filters: simple moving averages (unsymmetric). +Finding the eigenvalues and eigenvectors of a Hermitian matrix -This filter replaces array by results of SMA(K) filter. SMA(K) is defined -as filter which averages at most K previous points (previous - not points -AROUND central point) - or less, in case of the first K-1 points. +The algorithm finds eigen pairs of a Hermitian matrix by reducing it to +real tridiagonal form and using the QL/QR algorithm. -INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - K - K>=1 (K can be larger than N , such cases will be - correctly handled). Window width. K=1 corresponds to - identity transformation (nothing changes). + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -OUTPUT PARAMETERS: - X - array, whose first N elements were processed with SMA(K) +Input parameters: + A - Hermitian matrix which is given by its upper or lower + triangular part. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - storage format. + ZNeeded - flag controlling whether the eigenvectors are needed or + not. If ZNeeded is equal to: + * 0, the eigenvectors are not returned; + * 1, the eigenvectors are returned. -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. +Output parameters: + D - eigenvalues in ascending order. + Array whose index ranges within [0..N-1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains the eigenvectors. + Array whose indexes range within [0..N-1, 0..N-1]. + The eigenvectors are stored in the matrix columns. -NOTE 2: this algorithm makes only one pass through array and uses running - sum to speed-up calculation of the averages. Additional measures - are taken to ensure that running sum on a long sequence of zero - elements will be correctly reset to zero even in the presence of - round-off error. +Result: + True, if the algorithm has converged. + False, if the algorithm hasn't converged (rare case). -NOTE 3: this is unsymmetric version of the algorithm, which does NOT - averages points after the current one. Only X[i], X[i-1], ... are - used when calculating new value of X[i]. We should also note that - this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. +Note: + eigenvectors of Hermitian matrix are defined up to multiplication by + a complex number L, such that |L|=1. -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 2005, 23 March 2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::filtersma(real_1d_array& x, ae_int_t k); -void alglib::filtersma(real_1d_array& x, ae_int_t n, ae_int_t k); +
    bool alglib::hmatrixevd( + complex_2d_array a, + ae_int_t n, + ae_int_t zneeded, + bool isupper, + real_1d_array& d, + complex_2d_array& z, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    -
    -using namespace alglib;
    -
    +
    /************************************************************************* +Subroutine for finding the eigenvalues and eigenvectors of a Hermitian +matrix with given indexes by using bisection and inverse iteration methods -int main(int argc, char **argv) -{ - // - // Here we demonstrate EMA(0.5) filtering for time series. - // - real_1d_array x = "[5,6,7,8]"; - - // - // Apply filter. - // We should get [5, 5.5, 6.25, 7.125] as result - // - filterema(x, 0.5); - printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [5,5.5,6.25,7.125] - return 0; -} - - -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +Input parameters:
    +    A       -   Hermitian matrix which is given  by  its  upper  or  lower
    +                triangular part.
    +                Array whose indexes range within [0..N-1, 0..N-1].
    +    N       -   size of matrix A.
    +    ZNeeded -   flag controlling whether the eigenvectors  are  needed  or
    +                not. If ZNeeded is equal to:
    +                 * 0, the eigenvectors are not returned;
    +                 * 1, the eigenvectors are returned.
    +    IsUpperA -  storage format of matrix A.
    +    I1, I2 -    index interval for searching (from I1 to I2).
    +                0 <= I1 <= I2 <= N-1.
     
    -using namespace alglib;
    +Output parameters:
    +    W       -   array of the eigenvalues found.
    +                Array whose index ranges within [0..I2-I1].
    +    Z       -   if ZNeeded is equal to:
    +                 * 0, Z hasn't changed;
    +                 * 1, Z contains eigenvectors.
    +                Array whose indexes range within [0..N-1, 0..I2-I1].
    +                In  that  case,  the eigenvectors are stored in the matrix
    +                columns.
     
    +Result:
    +    True, if successful. W contains the eigenvalues, Z contains the
    +    eigenvectors (if needed).
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // Here we demonstrate LRMA(3) filtering for time series.
    -    //
    -    real_1d_array x = "[7,8,8,9,12,12]";
    +    False, if the bisection method subroutine  wasn't  able  to  find  the
    +    eigenvalues  in  the  given  interval  or  if  the  inverse  iteration
    +    subroutine wasn't able to find  all  the  corresponding  eigenvectors.
    +    In that case, the eigenvalues and eigenvectors are not returned.
     
    -    //
    -    // Apply filter.
    -    // We should get [7.0000, 8.0000, 8.1667, 8.8333, 11.6667, 12.5000] as result
    -    //    
    -    filterlrma(x, 3);
    -    printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [7.0000,8.0000,8.1667,8.8333,11.6667,12.5000]
    -    return 0;
    -}
    +Note:
    +    eigen vectors of Hermitian matrix are defined up to multiplication  by
    +    a complex number L, such as |L|=1.
     
    +  -- ALGLIB --
    +     Copyright 07.01.2006, 24.03.2007 by Bochkanov Sergey.
    +*************************************************************************/
    +
    bool alglib::hmatrixevdi( + complex_2d_array a, + ae_int_t n, + ae_int_t zneeded, + bool isupper, + ae_int_t i1, + ae_int_t i2, + real_1d_array& w, + complex_2d_array& z, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +
    /************************************************************************* +Subroutine for finding the eigenvalues (and eigenvectors) of a Hermitian +matrix in a given half-interval (A, B] by using a bisection and inverse +iteration -using namespace alglib; +Input parameters: + A - Hermitian matrix which is given by its upper or lower + triangular part. Array whose indexes range within + [0..N-1, 0..N-1]. + N - size of matrix A. + ZNeeded - flag controlling whether the eigenvectors are needed or + not. If ZNeeded is equal to: + * 0, the eigenvectors are not returned; + * 1, the eigenvectors are returned. + IsUpperA - storage format of matrix A. + B1, B2 - half-interval (B1, B2] to search eigenvalues in. +Output parameters: + M - number of eigenvalues found in a given half-interval, M>=0 + W - array of the eigenvalues found. + Array whose index ranges within [0..M-1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains eigenvectors. + Array whose indexes range within [0..N-1, 0..M-1]. + The eigenvectors are stored in the matrix columns. -int main(int argc, char **argv) -{ - // - // Here we demonstrate SMA(k) filtering for time series. - // - real_1d_array x = "[5,6,7,8]"; +Result: + True, if successful. M contains the number of eigenvalues in the given + half-interval (could be equal to 0), W contains the eigenvalues, + Z contains the eigenvectors (if needed). - // - // Apply filter. - // We should get [5, 5.5, 6.5, 7.5] as result - // - filtersma(x, 2); - printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [5,5.5,6.5,7.5] - return 0; -} + False, if the bisection method subroutine wasn't able to find the + eigenvalues in the given interval or if the inverse iteration + subroutine wasn't able to find all the corresponding eigenvectors. + In that case, the eigenvalues and eigenvectors are not returned, M is + equal to 0. +Note: + eigen vectors of Hermitian matrix are defined up to multiplication by + a complex number L, such as |L|=1. -
    -
    - -fresnelintegral
    - - -
    - + -- ALGLIB -- + Copyright 07.01.2006, 24.03.2007 by Bochkanov Sergey. +*************************************************************************/ +
    bool alglib::hmatrixevdr( + complex_2d_array a, + ae_int_t n, + ae_int_t zneeded, + bool isupper, + double b1, + double b2, + ae_int_t& m, + real_1d_array& w, + complex_2d_array& z, + const xparams _params = alglib::xdefault); + +
    +
     
    /************************************************************************* -Fresnel integral - -Evaluates the Fresnel integrals +Finding eigenvalues and eigenvectors of a general (unsymmetric) matrix - x - - - | | -C(x) = | cos(pi/2 t**2) dt, - | | - - - 0 + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - x - - - | | -S(x) = | sin(pi/2 t**2) dt. - | | - - - 0 +The algorithm finds eigenvalues and eigenvectors of a general matrix by +using the QR algorithm with multiple shifts. The algorithm can find +eigenvalues and both left and right eigenvectors. +The right eigenvector is a vector x such that A*x = w*x, and the left +eigenvector is a vector y such that y'*A = w*y' (here y' implies a complex +conjugate transposition of vector y). -The integrals are evaluated by a power series for x < 1. -For x >= 1 auxiliary functions f(x) and g(x) are employed -such that +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + VNeeded - flag controlling whether eigenvectors are needed or not. + If VNeeded is equal to: + * 0, eigenvectors are not returned; + * 1, right eigenvectors are returned; + * 2, left eigenvectors are returned; + * 3, both left and right eigenvectors are returned. -C(x) = 0.5 + f(x) sin( pi/2 x**2 ) - g(x) cos( pi/2 x**2 ) -S(x) = 0.5 - f(x) cos( pi/2 x**2 ) - g(x) sin( pi/2 x**2 ) +Output parameters: + WR - real parts of eigenvalues. + Array whose index ranges within [0..N-1]. + WR - imaginary parts of eigenvalues. + Array whose index ranges within [0..N-1]. + VL, VR - arrays of left and right eigenvectors (if they are needed). + If WI[i]=0, the respective eigenvalue is a real number, + and it corresponds to the column number I of matrices VL/VR. + If WI[i]>0, we have a pair of complex conjugate numbers with + positive and negative imaginary parts: + the first eigenvalue WR[i] + sqrt(-1)*WI[i]; + the second eigenvalue WR[i+1] + sqrt(-1)*WI[i+1]; + WI[i]>0 + WI[i+1] = -WI[i] < 0 + In that case, the eigenvector corresponding to the first + eigenvalue is located in i and i+1 columns of matrices + VL/VR (the column number i contains the real part, and the + column number i+1 contains the imaginary part), and the vector + corresponding to the second eigenvalue is a complex conjugate to + the first vector. + Arrays whose indexes range within [0..N-1, 0..N-1]. +Result: + True, if the algorithm has converged. + False, if the algorithm has not converged. +Note 1: + Some users may ask the following question: what if WI[N-1]>0? + WI[N] must contain an eigenvalue which is complex conjugate to the + N-th eigenvalue, but the array has only size N? + The answer is as follows: such a situation cannot occur because the + algorithm finds a pairs of eigenvalues, therefore, if WI[i]>0, I is + strictly less than N-1. -ACCURACY: +Note 2: + The algorithm performance depends on the value of the internal parameter + NS of the InternalSchurDecomposition subroutine which defines the number + of shifts in the QR algorithm (similarly to the block width in block-matrix + algorithms of linear algebra). If you require maximum performance + on your machine, it is recommended to adjust this parameter manually. - Relative error. -Arithmetic function domain # trials peak rms - IEEE S(x) 0, 10 10000 2.0e-15 3.2e-16 - IEEE C(x) 0, 10 10000 1.8e-15 3.3e-16 +See also the InternalTREVC subroutine. -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier +The algorithm is based on the LAPACK 3.0 library. *************************************************************************/ -
    void alglib::fresnelintegral(double x, double& c, double& s); +
    bool alglib::rmatrixevd( + real_2d_array a, + ae_int_t n, + ae_int_t vneeded, + real_1d_array& wr, + real_1d_array& wi, + real_2d_array& vl, + real_2d_array& vr, + const xparams _params = alglib::xdefault);
    - -
    - -gammafunction
    -lngamma
    - - -
    - +
     
    /************************************************************************* -Gamma function - -Input parameters: - X - argument - -Domain: - 0 < X < 171.6 - -170 < X < 0, X is not an integer. - -Relative error: - arithmetic domain # trials peak rms - IEEE -170,-33 20000 2.3e-15 3.3e-16 - IEEE -33, 33 20000 9.4e-16 2.2e-16 - IEEE 33, 171.6 20000 2.3e-15 3.2e-16 +Finding the eigenvalues and eigenvectors of a symmetric matrix -Cephes Math Library Release 2.8: June, 2000 -Original copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier -Translated to AlgoPascal by Bochkanov Sergey (2005, 2006, 2007). -*************************************************************************/ -
    double alglib::gammafunction(double x); +The algorithm finds eigen pairs of a symmetric matrix by reducing it to +tridiagonal form and using the QL/QR algorithm. -
    - -
    -
    /************************************************************************* -Natural logarithm of gamma function + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. Input parameters: - X - argument - -Result: - logarithm of the absolute value of the Gamma(X). + A - symmetric matrix which is given by its upper or lower + triangular part. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + ZNeeded - flag controlling whether the eigenvectors are needed or not. + If ZNeeded is equal to: + * 0, the eigenvectors are not returned; + * 1, the eigenvectors are returned. + IsUpper - storage format. Output parameters: - SgnGam - sign(Gamma(X)) - -Domain: - 0 < X < 2.55e305 - -2.55e305 < X < 0, X is not an integer. - -ACCURACY: -arithmetic domain # trials peak rms - IEEE 0, 3 28000 5.4e-16 1.1e-16 - IEEE 2.718, 2.556e305 40000 3.5e-16 8.3e-17 -The error criterion was relative when the function magnitude -was greater than one but absolute when it was less than one. + D - eigenvalues in ascending order. + Array whose index ranges within [0..N-1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains the eigenvectors. + Array whose indexes range within [0..N-1, 0..N-1]. + The eigenvectors are stored in the matrix columns. -The following test used the relative error criterion, though -at certain points the relative error could be much higher than -indicated. - IEEE -200, -4 10000 4.8e-16 1.3e-16 +Result: + True, if the algorithm has converged. + False, if the algorithm hasn't converged (rare case). -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier -Translated to AlgoPascal by Bochkanov Sergey (2005, 2006, 2007). + -- ALGLIB -- + Copyright 2005-2008 by Bochkanov Sergey *************************************************************************/ -
    double alglib::lngamma(double x, double& sgngam); +
    bool alglib::smatrixevd( + real_2d_array a, + ae_int_t n, + ae_int_t zneeded, + bool isupper, + real_1d_array& d, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - - - +
     
    /************************************************************************* -Returns Gauss and Gauss-Kronrod nodes/weights for Gauss-Jacobi -quadrature on [-1,1] with weight function +Subroutine for finding the eigenvalues and eigenvectors of a symmetric +matrix with given indexes by using bisection and inverse iteration methods. - W(x)=Power(1-x,Alpha)*Power(1+x,Beta). +Input parameters: + A - symmetric matrix which is given by its upper or lower + triangular part. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + ZNeeded - flag controlling whether the eigenvectors are needed or not. + If ZNeeded is equal to: + * 0, the eigenvectors are not returned; + * 1, the eigenvectors are returned. + IsUpperA - storage format of matrix A. + I1, I2 - index interval for searching (from I1 to I2). + 0 <= I1 <= I2 <= N-1. -INPUT PARAMETERS: - N - number of Kronrod nodes, must be odd number, >=3. - Alpha - power-law coefficient, Alpha>-1 - Beta - power-law coefficient, Beta>-1 +Output parameters: + W - array of the eigenvalues found. + Array whose index ranges within [0..I2-I1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains eigenvectors. + Array whose indexes range within [0..N-1, 0..I2-I1]. + In that case, the eigenvectors are stored in the matrix columns. -OUTPUT PARAMETERS: - Info - error code: - * -5 no real and positive Gauss-Kronrod formula can - be created for such a weight function with a - given number of nodes. - * -4 an error was detected when calculating - weights/nodes. Alpha or Beta are too close - to -1 to obtain weights/nodes with high enough - accuracy, or, may be, N is too large. Try to - use multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N was passed - * +1 OK - * +2 OK, but quadrature rule have exterior nodes, - x[0]<-1 or x[n-1]>+1 - X - array[0..N-1] - array of quadrature nodes, ordered in - ascending order. - WKronrod - array[0..N-1] - Kronrod weights - WGauss - array[0..N-1] - Gauss weights (interleaved with zeros - corresponding to extended Kronrod nodes). +Result: + True, if successful. W contains the eigenvalues, Z contains the + eigenvectors (if needed). + False, if the bisection method subroutine wasn't able to find the + eigenvalues in the given interval or if the inverse iteration subroutine + wasn't able to find all the corresponding eigenvectors. + In that case, the eigenvalues and eigenvectors are not returned. -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey + Copyright 07.01.2006 by Bochkanov Sergey *************************************************************************/ -
    void alglib::gkqgenerategaussjacobi( +
    bool alglib::smatrixevdi( + real_2d_array a, ae_int_t n, - double alpha, - double beta, - ae_int_t& info, - real_1d_array& x, - real_1d_array& wkronrod, - real_1d_array& wgauss); + ae_int_t zneeded, + bool isupper, + ae_int_t i1, + ae_int_t i2, + real_1d_array& w, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Returns Gauss and Gauss-Kronrod nodes/weights for Gauss-Legendre -quadrature with N points. +Subroutine for finding the eigenvalues (and eigenvectors) of a symmetric +matrix in a given half open interval (A, B] by using a bisection and +inverse iteration -GKQLegendreCalc (calculation) or GKQLegendreTbl (precomputed table) is -used depending on machine precision and number of nodes. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - N - number of Kronrod nodes, must be odd number, >=3. +Input parameters: + A - symmetric matrix which is given by its upper or lower + triangular part. Array [0..N-1, 0..N-1]. + N - size of matrix A. + ZNeeded - flag controlling whether the eigenvectors are needed or not. + If ZNeeded is equal to: + * 0, the eigenvectors are not returned; + * 1, the eigenvectors are returned. + IsUpperA - storage format of matrix A. + B1, B2 - half open interval (B1, B2] to search eigenvalues in. -OUTPUT PARAMETERS: - Info - error code: - * -4 an error was detected when calculating - weights/nodes. N is too large to obtain - weights/nodes with high enough accuracy. - Try to use multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, ordered in - ascending order. - WKronrod - array[0..N-1] - Kronrod weights - WGauss - array[0..N-1] - Gauss weights (interleaved with zeros - corresponding to extended Kronrod nodes). +Output parameters: + M - number of eigenvalues found in a given half-interval (M>=0). + W - array of the eigenvalues found. + Array whose index ranges within [0..M-1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains eigenvectors. + Array whose indexes range within [0..N-1, 0..M-1]. + The eigenvectors are stored in the matrix columns. + +Result: + True, if successful. M contains the number of eigenvalues in the given + half-interval (could be equal to 0), W contains the eigenvalues, + Z contains the eigenvectors (if needed). + False, if the bisection method subroutine wasn't able to find the + eigenvalues in the given interval or if the inverse iteration subroutine + wasn't able to find all the corresponding eigenvectors. + In that case, the eigenvalues and eigenvectors are not returned, + M is equal to 0. -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey + Copyright 07.01.2006 by Bochkanov Sergey *************************************************************************/ -
    void alglib::gkqgenerategausslegendre( +
    bool alglib::smatrixevdr( + real_2d_array a, ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& wkronrod, - real_1d_array& wgauss); + ae_int_t zneeded, + bool isupper, + double b1, + double b2, + ae_int_t& m, + real_1d_array& w, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Computation of nodes and weights of a Gauss-Kronrod quadrature formula - -The algorithm generates the N-point Gauss-Kronrod quadrature formula with -weight function given by coefficients alpha and beta of a recurrence -relation which generates a system of orthogonal polynomials: +Finding the eigenvalues and eigenvectors of a tridiagonal symmetric matrix - P-1(x) = 0 - P0(x) = 1 - Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) +The algorithm finds the eigen pairs of a tridiagonal symmetric matrix by +using an QL/QR algorithm with implicit shifts. -and zero moment Mu0 - - Mu0 = integral(W(x)dx,a,b) + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +Input parameters: + D - the main diagonal of a tridiagonal matrix. + Array whose index ranges within [0..N-1]. + E - the secondary diagonal of a tridiagonal matrix. + Array whose index ranges within [0..N-2]. + N - size of matrix A. + ZNeeded - flag controlling whether the eigenvectors are needed or not. + If ZNeeded is equal to: + * 0, the eigenvectors are not needed; + * 1, the eigenvectors of a tridiagonal matrix + are multiplied by the square matrix Z. It is used if the + tridiagonal matrix is obtained by the similarity + transformation of a symmetric matrix; + * 2, the eigenvectors of a tridiagonal matrix replace the + square matrix Z; + * 3, matrix Z contains the first row of the eigenvectors + matrix. + Z - if ZNeeded=1, Z contains the square matrix by which the + eigenvectors are multiplied. + Array whose indexes range within [0..N-1, 0..N-1]. -INPUT PARAMETERS: - Alpha – alpha coefficients, array[0..floor(3*K/2)]. - Beta – beta coefficients, array[0..ceil(3*K/2)]. - Beta[0] is not used and may be arbitrary. - Beta[I]>0. - Mu0 – zeroth moment of the weight function. - N – number of nodes of the Gauss-Kronrod quadrature formula, - N >= 3, - N = 2*K+1. +Output parameters: + D - eigenvalues in ascending order. + Array whose index ranges within [0..N-1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains the product of a given matrix (from the left) + and the eigenvectors matrix (from the right); + * 2, Z contains the eigenvectors. + * 3, Z contains the first row of the eigenvectors matrix. + If ZNeeded<3, Z is the array whose indexes range within [0..N-1, 0..N-1]. + In that case, the eigenvectors are stored in the matrix columns. + If ZNeeded=3, Z is the array whose indexes range within [0..0, 0..N-1]. -OUTPUT PARAMETERS: - Info - error code: - * -5 no real and positive Gauss-Kronrod formula can - be created for such a weight function with a - given number of nodes. - * -4 N is too large, task may be ill conditioned - - x[i]=x[i+1] found. - * -3 internal eigenproblem solver hasn't converged - * -2 Beta[i]<=0 - * -1 incorrect N was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - WKronrod - array[0..N-1] - Kronrod weights - WGauss - array[0..N-1] - Gauss weights (interleaved with zeros - corresponding to extended Kronrod nodes). +Result: + True, if the algorithm has converged. + False, if the algorithm hasn't converged. - -- ALGLIB -- - Copyright 08.05.2009 by Bochkanov Sergey + -- LAPACK routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994 *************************************************************************/ -
    void alglib::gkqgeneraterec( - real_1d_array alpha, - real_1d_array beta, - double mu0, +
    bool alglib::smatrixtdevd( + real_1d_array& d, + real_1d_array e, ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& wkronrod, - real_1d_array& wgauss); + ae_int_t zneeded, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Returns Gauss and Gauss-Kronrod nodes for quadrature with N points. +Subroutine for finding tridiagonal matrix eigenvalues/vectors with given +indexes (in ascending order) by using the bisection and inverse iteraion. -Reduction to tridiagonal eigenproblem is used. +Input parameters: + D - the main diagonal of a tridiagonal matrix. + Array whose index ranges within [0..N-1]. + E - the secondary diagonal of a tridiagonal matrix. + Array whose index ranges within [0..N-2]. + N - size of matrix. N>=0. + ZNeeded - flag controlling whether the eigenvectors are needed or not. + If ZNeeded is equal to: + * 0, the eigenvectors are not needed; + * 1, the eigenvectors of a tridiagonal matrix are multiplied + by the square matrix Z. It is used if the + tridiagonal matrix is obtained by the similarity transformation + of a symmetric matrix. + * 2, the eigenvectors of a tridiagonal matrix replace + matrix Z. + I1, I2 - index interval for searching (from I1 to I2). + 0 <= I1 <= I2 <= N-1. + Z - if ZNeeded is equal to: + * 0, Z isn't used and remains unchanged; + * 1, Z contains the square matrix (array whose indexes range within [0..N-1, 0..N-1]) + which reduces the given symmetric matrix to tridiagonal form; + * 2, Z isn't used (but changed on the exit). -INPUT PARAMETERS: - N - number of Kronrod nodes, must be odd number, >=3. +Output parameters: + D - array of the eigenvalues found. + Array whose index ranges within [0..I2-I1]. + Z - if ZNeeded is equal to: + * 0, doesn't contain any information; + * 1, contains the product of a given NxN matrix Z (from the left) and + Nx(I2-I1) matrix of the eigenvectors found (from the right). + Array whose indexes range within [0..N-1, 0..I2-I1]. + * 2, contains the matrix of the eigenvalues found. + Array whose indexes range within [0..N-1, 0..I2-I1]. -OUTPUT PARAMETERS: - Info - error code: - * -4 an error was detected when calculating - weights/nodes. N is too large to obtain - weights/nodes with high enough accuracy. - Try to use multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, ordered in - ascending order. - WKronrod - array[0..N-1] - Kronrod weights - WGauss - array[0..N-1] - Gauss weights (interleaved with zeros - corresponding to extended Kronrod nodes). + +Result: + + True, if successful. In that case, D contains the eigenvalues, + Z contains the eigenvectors (if needed). + It should be noted that the subroutine changes the size of arrays D and Z. + + False, if the bisection method subroutine wasn't able to find the eigenvalues + in the given interval or if the inverse iteration subroutine wasn't able + to find all the corresponding eigenvectors. In that case, the eigenvalues + and eigenvectors are not returned. -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey + Copyright 25.12.2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::gkqlegendrecalc( +
    bool alglib::smatrixtdevdi( + real_1d_array& d, + real_1d_array e, ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& wkronrod, - real_1d_array& wgauss); + ae_int_t zneeded, + ae_int_t i1, + ae_int_t i2, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Returns Gauss and Gauss-Kronrod nodes for quadrature with N points using -pre-calculated table. Nodes/weights were computed with accuracy up to -1.0E-32 (if MPFR version of ALGLIB is used). In standard double precision -accuracy reduces to something about 2.0E-16 (depending on your compiler's -handling of long floating point constants). +Subroutine for finding the tridiagonal matrix eigenvalues/vectors in a +given half-interval (A, B] by using bisection and inverse iteration. -INPUT PARAMETERS: - N - number of Kronrod nodes. - N can be 15, 21, 31, 41, 51, 61. +Input parameters: + D - the main diagonal of a tridiagonal matrix. + Array whose index ranges within [0..N-1]. + E - the secondary diagonal of a tridiagonal matrix. + Array whose index ranges within [0..N-2]. + N - size of matrix, N>=0. + ZNeeded - flag controlling whether the eigenvectors are needed or not. + If ZNeeded is equal to: + * 0, the eigenvectors are not needed; + * 1, the eigenvectors of a tridiagonal matrix are multiplied + by the square matrix Z. It is used if the tridiagonal + matrix is obtained by the similarity transformation + of a symmetric matrix. + * 2, the eigenvectors of a tridiagonal matrix replace matrix Z. + A, B - half-interval (A, B] to search eigenvalues in. + Z - if ZNeeded is equal to: + * 0, Z isn't used and remains unchanged; + * 1, Z contains the square matrix (array whose indexes range + within [0..N-1, 0..N-1]) which reduces the given symmetric + matrix to tridiagonal form; + * 2, Z isn't used (but changed on the exit). -OUTPUT PARAMETERS: - X - array[0..N-1] - array of quadrature nodes, ordered in - ascending order. - WKronrod - array[0..N-1] - Kronrod weights - WGauss - array[0..N-1] - Gauss weights (interleaved with zeros - corresponding to extended Kronrod nodes). +Output parameters: + D - array of the eigenvalues found. + Array whose index ranges within [0..M-1]. + M - number of eigenvalues found in the given half-interval (M>=0). + Z - if ZNeeded is equal to: + * 0, doesn't contain any information; + * 1, contains the product of a given NxN matrix Z (from the + left) and NxM matrix of the eigenvectors found (from the + right). Array whose indexes range within [0..N-1, 0..M-1]. + * 2, contains the matrix of the eigenvectors found. + Array whose indexes range within [0..N-1, 0..M-1]. +Result: + + True, if successful. In that case, M contains the number of eigenvalues + in the given half-interval (could be equal to 0), D contains the eigenvalues, + Z contains the eigenvectors (if needed). + It should be noted that the subroutine changes the size of arrays D and Z. + + False, if the bisection method subroutine wasn't able to find the + eigenvalues in the given interval or if the inverse iteration subroutine + wasn't able to find all the corresponding eigenvectors. In that case, + the eigenvalues and eigenvectors are not returned, M is equal to 0. -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey + Copyright 31.03.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::gkqlegendretbl( +
    bool alglib::smatrixtdevdr( + real_1d_array& d, + real_1d_array e, ae_int_t n, - real_1d_array& x, - real_1d_array& wkronrod, - real_1d_array& wgauss, - double& eps); + ae_int_t zneeded, + double a, + double b, + ae_int_t& m, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - + - +
     
    /************************************************************************* -Returns nodes/weights for Gauss-Hermite quadrature on (-inf,+inf) with -weight function W(x)=Exp(-x*x) +Exponential integral Ei(x) -INPUT PARAMETERS: - N - number of nodes, >=1 + x + - t + | | e + Ei(x) = -|- --- dt . + | | t + - + -inf -OUTPUT PARAMETERS: - Info - error code: - * -4 an error was detected when calculating - weights/nodes. May be, N is too large. Try to - use multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N/Alpha was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. +Not defined for x <= 0. +See also expn.c. - -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey + +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE 0,100 50000 8.6e-16 1.3e-16 + +Cephes Math Library Release 2.8: May, 1999 +Copyright 1999 by Stephen L. Moshier *************************************************************************/ -
    void alglib::gqgenerategausshermite( - ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); +
    double alglib::exponentialintegralei( + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Returns nodes/weights for Gauss-Jacobi quadrature on [-1,1] with weight -function W(x)=Power(1-x,Alpha)*Power(1+x,Beta). +Exponential integral En(x) -INPUT PARAMETERS: - N - number of nodes, >=1 - Alpha - power-law coefficient, Alpha>-1 - Beta - power-law coefficient, Beta>-1 +Evaluates the exponential integral -OUTPUT PARAMETERS: - Info - error code: - * -4 an error was detected when calculating - weights/nodes. Alpha or Beta are too close - to -1 to obtain weights/nodes with high enough - accuracy, or, may be, N is too large. Try to - use multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N/Alpha/Beta was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. + inf. + - + | | -xt + | e + E (x) = | ---- dt. + n | n + | | t + - + 1 - -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey +Both n and x must be nonnegative. + +The routine employs either a power series, a continued +fraction, or an asymptotic formula depending on the +relative values of n and x. + +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE 0, 30 10000 1.7e-15 3.6e-16 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1985, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::gqgenerategaussjacobi( +
    double alglib::exponentialintegralen( + double x, ae_int_t n, - double alpha, - double beta, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); + const xparams _params = alglib::xdefault);
    - + +
    + +fcdistribution
    +fdistribution
    +invfdistribution
    + + +
    +
     
    /************************************************************************* -Returns nodes/weights for Gauss-Laguerre quadrature on [0,+inf) with -weight function W(x)=Power(x,Alpha)*Exp(-x) +Complemented F distribution -INPUT PARAMETERS: - N - number of nodes, >=1 - Alpha - power-law coefficient, Alpha>-1 +Returns the area from x to infinity under the F density +function (also known as Snedcor's density or the +variance ratio density). -OUTPUT PARAMETERS: - Info - error code: - * -4 an error was detected when calculating - weights/nodes. Alpha is too close to -1 to - obtain weights/nodes with high enough accuracy - or, may be, N is too large. Try to use - multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N/Alpha was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. + inf. + - + 1 | | a-1 b-1 +1-P(x) = ------ | t (1-t) dt + B(a,b) | | + - + x - -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::gqgenerategausslaguerre( - ae_int_t n, - double alpha, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); -
    - -
    -
    /************************************************************************* -Returns nodes/weights for Gauss-Legendre quadrature on [-1,1] with N -nodes. +The incomplete beta integral is used, according to the +formula -INPUT PARAMETERS: - N - number of nodes, >=1 +P(x) = incbet( df2/2, df1/2, (df2/(df2 + df1*x) ). -OUTPUT PARAMETERS: - Info - error code: - * -4 an error was detected when calculating - weights/nodes. N is too large to obtain - weights/nodes with high enough accuracy. - Try to use multiple precision version. - * -3 internal eigenproblem solver hasn't converged - * -1 incorrect N was passed - * +1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. +ACCURACY: - -- ALGLIB -- - Copyright 12.05.2009 by Bochkanov Sergey +Tested at random points (a,b,x) in the indicated intervals. + x a,b Relative error: +arithmetic domain domain # trials peak rms + IEEE 0,1 1,100 100000 3.7e-14 5.9e-16 + IEEE 1,5 1,100 100000 8.0e-15 1.6e-15 + IEEE 0,1 1,10000 100000 1.8e-11 3.5e-13 + IEEE 1,5 1,10000 100000 2.0e-11 3.0e-12 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::gqgenerategausslegendre( - ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); +
    double alglib::fcdistribution( + ae_int_t a, + ae_int_t b, + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Computation of nodes and weights for a Gauss-Lobatto quadrature formula +F distribution -The algorithm generates the N-point Gauss-Lobatto quadrature formula with -weight function given by coefficients alpha and beta of a recurrence which -generates a system of orthogonal polynomials. +Returns the area from zero to x under the F density +function (also known as Snedcor's density or the +variance ratio density). This is the density +of x = (u1/df1)/(u2/df2), where u1 and u2 are random +variables having Chi square distributions with df1 +and df2 degrees of freedom, respectively. +The incomplete beta integral is used, according to the +formula -P-1(x) = 0 -P0(x) = 1 -Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) +P(x) = incbet( df1/2, df2/2, (df1*x/(df2 + df1*x) ). -and zeroth moment Mu0 -Mu0 = integral(W(x)dx,a,b) +The arguments a and b are greater than zero, and x is +nonnegative. -INPUT PARAMETERS: - Alpha – array[0..N-2], alpha coefficients - Beta – array[0..N-2], beta coefficients. - Zero-indexed element is not used, may be arbitrary. - Beta[I]>0 - Mu0 – zeroth moment of the weighting function. - A – left boundary of the integration interval. - B – right boundary of the integration interval. - N – number of nodes of the quadrature formula, N>=3 - (including the left and right boundary nodes). +ACCURACY: -OUTPUT PARAMETERS: - Info - error code: - * -3 internal eigenproblem solver hasn't converged - * -2 Beta[i]<=0 - * -1 incorrect N was passed - * 1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. +Tested at random points (a,b,x). - -- ALGLIB -- - Copyright 2005-2009 by Bochkanov Sergey + x a,b Relative error: +arithmetic domain domain # trials peak rms + IEEE 0,1 0,100 100000 9.8e-15 1.7e-15 + IEEE 1,5 0,100 100000 6.5e-15 3.5e-16 + IEEE 0,1 1,10000 100000 2.2e-11 3.3e-12 + IEEE 1,5 1,10000 100000 1.1e-11 1.7e-13 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::gqgenerategausslobattorec( - real_1d_array alpha, - real_1d_array beta, - double mu0, - double a, - double b, - ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); +
    double alglib::fdistribution( + ae_int_t a, + ae_int_t b, + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Computation of nodes and weights for a Gauss-Radau quadrature formula +Inverse of complemented F distribution -The algorithm generates the N-point Gauss-Radau quadrature formula with -weight function given by the coefficients alpha and beta of a recurrence -which generates a system of orthogonal polynomials. +Finds the F density argument x such that the integral +from x to infinity of the F density is equal to the +given probability p. -P-1(x) = 0 -P0(x) = 1 -Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) +This is accomplished using the inverse beta integral +function and the relations -and zeroth moment Mu0 + z = incbi( df2/2, df1/2, p ) + x = df2 (1-z) / (df1 z). -Mu0 = integral(W(x)dx,a,b) +Note: the following relations hold for the inverse of +the uncomplemented F distribution: -INPUT PARAMETERS: - Alpha – array[0..N-2], alpha coefficients. - Beta – array[0..N-1], beta coefficients - Zero-indexed element is not used. - Beta[I]>0 - Mu0 – zeroth moment of the weighting function. - A – left boundary of the integration interval. - N – number of nodes of the quadrature formula, N>=2 - (including the left boundary node). + z = incbi( df1/2, df2/2, p ) + x = df2 z / (df1 (1-z)). -OUTPUT PARAMETERS: - Info - error code: - * -3 internal eigenproblem solver hasn't converged - * -2 Beta[i]<=0 - * -1 incorrect N was passed - * 1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. +ACCURACY: +Tested at random points (a,b,p). - -- ALGLIB -- - Copyright 2005-2009 by Bochkanov Sergey + a,b Relative error: +arithmetic domain # trials peak rms + For p between .001 and 1: + IEEE 1,100 100000 8.3e-15 4.7e-16 + IEEE 1,10000 100000 2.1e-11 1.4e-13 + For p between 10^-6 and 10^-3: + IEEE 1,100 50000 1.3e-12 8.4e-15 + IEEE 1,10000 50000 3.0e-12 4.8e-14 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::gqgenerategaussradaurec( - real_1d_array alpha, - real_1d_array beta, - double mu0, - double a, - ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); +
    double alglib::invfdistribution( + ae_int_t a, + ae_int_t b, + double y, + const xparams _params = alglib::xdefault);
    - + +
    + +fftc1d
    +fftc1dinv
    +fftr1d
    +fftr1dinv
    + + + + + + +
    fft_complex_d1 Complex FFT: simple example
    fft_complex_d2 Complex FFT: advanced example
    fft_real_d1 Real FFT: simple example
    fft_real_d2 Real FFT: advanced example
    +
     
    /************************************************************************* -Computation of nodes and weights for a Gauss quadrature formula +1-dimensional complex FFT. -The algorithm generates the N-point Gauss quadrature formula with weight -function given by coefficients alpha and beta of a recurrence relation -which generates a system of orthogonal polynomials: +Array size N may be arbitrary number (composite or prime). Composite N's +are handled with cache-oblivious variation of a Cooley-Tukey algorithm. +Small prime-factors are transformed using hard coded codelets (similar to +FFTW codelets, but without low-level optimization), large prime-factors +are handled with Bluestein's algorithm. -P-1(x) = 0 -P0(x) = 1 -Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) +Fastests transforms are for smooth N's (prime factors are 2, 3, 5 only), +most fast for powers of 2. When N have prime factors larger than these, +but orders of magnitude smaller than N, computations will be about 4 times +slower than for nearby highly composite N's. When N itself is prime, speed +will be 6 times lower. -and zeroth moment Mu0 +Algorithm has O(N*logN) complexity for any N (composite or prime). -Mu0 = integral(W(x)dx,a,b) +INPUT PARAMETERS + A - array[0..N-1] - complex function to be transformed + N - problem size -INPUT PARAMETERS: - Alpha – array[0..N-1], alpha coefficients - Beta – array[0..N-1], beta coefficients - Zero-indexed element is not used and may be arbitrary. - Beta[I]>0. - Mu0 – zeroth moment of the weight function. - N – number of nodes of the quadrature formula, N>=1 +OUTPUT PARAMETERS + A - DFT of a input array, array[0..N-1] + A_out[j] = SUM(A_in[k]*exp(-2*pi*sqrt(-1)*j*k/N), k = 0..N-1) -OUTPUT PARAMETERS: - Info - error code: - * -3 internal eigenproblem solver hasn't converged - * -2 Beta[i]<=0 - * -1 incorrect N was passed - * 1 OK - X - array[0..N-1] - array of quadrature nodes, - in ascending order. - W - array[0..N-1] - array of quadrature weights. -- ALGLIB -- - Copyright 2005-2009 by Bochkanov Sergey + Copyright 29.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::gqgeneraterec( - real_1d_array alpha, - real_1d_array beta, - double mu0, +
    void alglib::fftc1d( + complex_1d_array& a, + const xparams _params = alglib::xdefault); +void alglib::fftc1d( + complex_1d_array& a, ae_int_t n, - ae_int_t& info, - real_1d_array& x, - real_1d_array& w); + const xparams _params = alglib::xdefault);
    - -
    - -hermitecalculate
    -hermitecoefficients
    -hermitesum
    - - -
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Calculation of the value of the Hermite polynomial. +1-dimensional complex inverse FFT. -Parameters: - n - degree, n>=0 - x - argument +Array size N may be arbitrary number (composite or prime). Algorithm has +O(N*logN) complexity for any N (composite or prime). -Result: - the value of the Hermite polynomial Hn at x -*************************************************************************/ -
    double alglib::hermitecalculate(ae_int_t n, double x); +See FFTC1D() description for more information about algorithm performance. -
    - -
    -
    /************************************************************************* -Representation of Hn as C[0] + C[1]*X + ... + C[N]*X^N +INPUT PARAMETERS + A - array[0..N-1] - complex array to be transformed + N - problem size -Input parameters: - N - polynomial degree, n>=0 +OUTPUT PARAMETERS + A - inverse DFT of a input array, array[0..N-1] + A_out[j] = SUM(A_in[k]/N*exp(+2*pi*sqrt(-1)*j*k/N), k = 0..N-1) -Output parameters: - C - coefficients + + -- ALGLIB -- + Copyright 29.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hermitecoefficients(ae_int_t n, real_1d_array& c); +
    void alglib::fftc1dinv( + complex_1d_array& a, + const xparams _params = alglib::xdefault); +void alglib::fftc1dinv( + complex_1d_array& a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Summation of Hermite polynomials using Clenshaw’s recurrence formula. +1-dimensional real FFT. -This routine calculates - c[0]*H0(x) + c[1]*H1(x) + ... + c[N]*HN(x) +Algorithm has O(N*logN) complexity for any N (composite or prime). -Parameters: - n - degree, n>=0 - x - argument +INPUT PARAMETERS + A - array[0..N-1] - real function to be transformed + N - problem size -Result: - the value of the Hermite polynomial at x -*************************************************************************/ -
    double alglib::hermitesum(real_1d_array c, ae_int_t n, double x); +OUTPUT PARAMETERS + F - DFT of a input array, array[0..N-1] + F[j] = SUM(A[k]*exp(-2*pi*sqrt(-1)*j*k/N), k = 0..N-1) -
    - - - -
    -
    /************************************************************************* -Portable high quality random number generator state. -Initialized with HQRNDRandomize() or HQRNDSeed(). +NOTE: + F[] satisfies symmetry property F[k] = conj(F[N-k]), so just one half +of array is usually needed. But for convinience subroutine returns full +complex array (with frequencies above N/2), so its result may be used by +other FFT-related subroutines. -Fields: - S1, S2 - seed values - V - precomputed value - MagicV - 'magic' value used to determine whether State structure - was correctly initialized. + + -- ALGLIB -- + Copyright 01.06.2009 by Bochkanov Sergey *************************************************************************/ -
    class hqrndstate -{ -}; +
    void alglib::fftr1d( + real_1d_array a, + complex_1d_array& f, + const xparams _params = alglib::xdefault); +void alglib::fftr1d( + real_1d_array a, + ae_int_t n, + complex_1d_array& f, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function generates random number from continuous distribution given -by finite sample X. +1-dimensional real inverse FFT. + +Algorithm has O(N*logN) complexity for any N (composite or prime). INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample, array[N] (can be larger, in this case only - leading N elements are used). THIS ARRAY MUST BE SORTED BY - ASCENDING. - N - number of elements to use, N>=1 + F - array[0..floor(N/2)] - frequencies from forward real FFT + N - problem size -RESULT - this function returns random number from continuous distribution which - tries to approximate X as mush as possible. min(X)<=Result<=max(X). +OUTPUT PARAMETERS + A - inverse DFT of a input array, array[0..N-1] - -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::hqrndcontinuous( - hqrndstate state, - real_1d_array x, - ae_int_t n); +NOTE: + F[] should satisfy symmetry property F[k] = conj(F[N-k]), so just one +half of frequencies array is needed - elements from 0 to floor(N/2). F[0] +is ALWAYS real. If N is even F[floor(N/2)] is real too. If N is odd, then +F[floor(N/2)] has no special properties. -
    - -
    -
    /************************************************************************* -This function generates random number from discrete distribution given by -finite sample X. +Relying on properties noted above, FFTR1DInv subroutine uses only elements +from 0th to floor(N/2)-th. It ignores imaginary part of F[0], and in case +N is even it ignores imaginary part of F[floor(N/2)] too. -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample - N - number of elements to use, N>=1 +When you call this function using full arguments list - "FFTR1DInv(F,N,A)" +- you can pass either either frequencies array with N elements or reduced +array with roughly N/2 elements - subroutine will successfully transform +both. + +If you call this function using reduced arguments list - "FFTR1DInv(F,A)" +- you must pass FULL array with N elements (although higher N/2 are still +not used) because array size is used to automatically determine FFT length -RESULT - this function returns one of the X[i] for random i=0..N-1 -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey + Copyright 01.06.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::hqrnddiscrete( - hqrndstate state, - real_1d_array x, - ae_int_t n); +
    void alglib::fftr1dinv( + complex_1d_array f, + real_1d_array& a, + const xparams _params = alglib::xdefault); +void alglib::fftr1dinv( + complex_1d_array f, + ae_int_t n, + real_1d_array& a, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
    -
    /************************************************************************* -Random number generator: exponential distribution +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "fasttransforms.h" -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +using namespace alglib; - -- ALGLIB -- - Copyright 11.08.2007 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::hqrndexponential(hqrndstate state, double lambdav); -
    - +int main(int argc, char **argv) +{ + // + // first we demonstrate forward FFT: + // [1i,1i,1i,1i] is converted to [4i, 0, 0, 0] + // + complex_1d_array z = "[1i,1i,1i,1i]"; + fftc1d(z); + printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [4i,0,0,0] + + // + // now we convert [4i, 0, 0, 0] back to [1i,1i,1i,1i] + // with backward FFT + // + fftc1dinv(z); + printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [1i,1i,1i,1i] + return 0; +} + + +
    -
    /************************************************************************* -Random number generator: normal numbers +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "fasttransforms.h" -This function generates one random number from normal distribution. -Its performance is equal to that of HQRNDNormal2() +using namespace alglib; -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::hqrndnormal(hqrndstate state); +int main(int argc, char **argv) +{ + // + // first we demonstrate forward FFT: + // [0,1,0,1i] is converted to [1+1i, -1-1i, -1-1i, 1+1i] + // + complex_1d_array z = "[0,1,0,1i]"; + fftc1d(z); + printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [1+1i, -1-1i, -1-1i, 1+1i] -
    - + // + // now we convert result back with backward FFT + // + fftc1dinv(z); + printf("%s\n", z.tostring(3).c_str()); // EXPECTED: [0,1,0,1i] + return 0; +} + + +
    -
    /************************************************************************* -Random number generator: normal numbers +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "fasttransforms.h" -This function generates two independent random numbers from normal -distribution. Its performance is equal to that of HQRNDNormal() +using namespace alglib; -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::hqrndnormal2(hqrndstate state, double& x1, double& x2); +int main(int argc, char **argv) +{ + // + // first we demonstrate forward FFT: + // [1,1,1,1] is converted to [4, 0, 0, 0] + // + real_1d_array x = "[1,1,1,1]"; + complex_1d_array f; + real_1d_array x2; + fftr1d(x, f); + printf("%s\n", f.tostring(3).c_str()); // EXPECTED: [4,0,0,0] -
    - -
    -
    /************************************************************************* -HQRNDState initialization with random values which come from standard -RNG. + // + // now we convert [4, 0, 0, 0] back to [1,1,1,1] + // with backward FFT + // + fftr1dinv(f, x2); + printf("%s\n", x2.tostring(3).c_str()); // EXPECTED: [1,1,1,1] + return 0; +} - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::hqrndrandomize(hqrndstate& state); -
    - +
    -
    /************************************************************************* -HQRNDState initialization with seed values +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "fasttransforms.h" - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::hqrndseed(ae_int_t s1, ae_int_t s2, hqrndstate& state); +using namespace alglib; -
    - -
    -
    /************************************************************************* -This function generates random integer number in [0, N) -1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() -2. N can be any positive number except for very large numbers: - * close to 2^31 on 32-bit systems - * close to 2^62 on 64-bit systems - An exception will be generated if N is too large. +int main(int argc, char **argv) +{ + // + // first we demonstrate forward FFT: + // [1,2,3,4] is converted to [10, -2+2i, -2, -2-2i] + // + // note that output array is self-adjoint: + // * f[0] = conj(f[0]) + // * f[1] = conj(f[3]) + // * f[2] = conj(f[2]) + // + real_1d_array x = "[1,2,3,4]"; + complex_1d_array f; + real_1d_array x2; + fftr1d(x, f); + printf("%s\n", f.tostring(3).c_str()); // EXPECTED: [10, -2+2i, -2, -2-2i] - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::hqrnduniformi(hqrndstate state, ae_int_t n); + // + // now we convert [10, -2+2i, -2, -2-2i] back to [1,2,3,4] + // + fftr1dinv(f, x2); + printf("%s\n", x2.tostring(3).c_str()); // EXPECTED: [1,2,3,4] -
    - + // + // remember that F is self-adjoint? It means that we can pass just half + // (slightly larger than half) of F to inverse real FFT and still get our result. + // + // I.e. instead [10, -2+2i, -2, -2-2i] we pass just [10, -2+2i, -2] and everything works! + // + // NOTE: in this case we should explicitly pass array length (which is 4) to ALGLIB; + // if not, it will automatically use array length to determine FFT size and + // will erroneously make half-length FFT. + // + f = "[10, -2+2i, -2]"; + fftr1dinv(f, 4, x2); + printf("%s\n", x2.tostring(3).c_str()); // EXPECTED: [1,2,3,4] + return 0; +} + + + +
    + +fhtr1d
    +fhtr1dinv
    + + +
    +
     
    /************************************************************************* -This function generates random real number in (0,1), -not including interval boundaries +1-dimensional Fast Hartley Transform. + +Algorithm has O(N*logN) complexity for any N (composite or prime). + +INPUT PARAMETERS + A - array[0..N-1] - real function to be transformed + N - problem size + +OUTPUT PARAMETERS + A - FHT of a input array, array[0..N-1], + A_out[k] = sum(A_in[j]*(cos(2*pi*j*k/N)+sin(2*pi*j*k/N)), j=0..N-1) -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 04.06.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::hqrnduniformr(hqrndstate state); +
    void alglib::fhtr1d( + real_1d_array& a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Random number generator: random X and Y such that X^2+Y^2=1 +1-dimensional inverse FHT. + +Algorithm has O(N*logN) complexity for any N (composite or prime). + +INPUT PARAMETERS + A - array[0..N-1] - complex array to be transformed + N - problem size + +OUTPUT PARAMETERS + A - inverse FHT of a input array, array[0..N-1] -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 29.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hqrndunit2(hqrndstate state, double& x, double& y); +
    void alglib::fhtr1dinv( + real_1d_array& a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
    -incompletebeta
    -invincompletebeta
    +filterema
    +filterlrma
    +filtersma
    + + +
    filters_d_ema EMA(alpha) filter
    filters_d_lrma LRMA(k) filter
    filters_d_sma SMA(k) filter
    - +
     
    /************************************************************************* -Incomplete beta integral - -Returns incomplete beta integral of the arguments, evaluated -from zero to x. The function is defined as +Filters: exponential moving averages. - x - - - - | (a+b) | | a-1 b-1 - ----------- | t (1-t) dt. - - - | | - | (a) | (b) - - 0 +This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is +defined as filter which replaces X[] by S[]: + S[0] = X[0] + S[t] = alpha*X[t] + (1-alpha)*S[t-1] -The domain of definition is 0 <= x <= 1. In this -implementation a and b are restricted to positive values. -The integral from x to 1 may be obtained by the symmetry -relation +INPUT PARAMETERS: + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + alpha - 0<alpha<=1, smoothing parameter. - 1 - incbet( a, b, x ) = incbet( b, a, 1-x ). +OUTPUT PARAMETERS: + X - array, whose first N elements were processed + with EMA(alpha) -The integral is evaluated by a continued fraction expansion -or, when b*x is small, by a power series. +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. -ACCURACY: +NOTE 2: this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. -Tested at uniformly distributed random points (a,b,x) with a and b -in "domain" and x between 0 and 1. - Relative error -arithmetic domain # trials peak rms - IEEE 0,5 10000 6.9e-15 4.5e-16 - IEEE 0,85 250000 2.2e-13 1.7e-14 - IEEE 0,1000 30000 5.3e-12 6.3e-13 - IEEE 0,10000 250000 9.3e-11 7.1e-12 - IEEE 0,100000 10000 8.7e-10 4.8e-11 -Outputs smaller than the IEEE gradual underflow threshold -were excluded from these statistics. +NOTE 3: technical analytis users quite often work with EMA coefficient + expressed in DAYS instead of fractions. If you want to calculate + EMA(N), where N is a number of days, you can use alpha=2/(N+1). -Cephes Math Library, Release 2.8: June, 2000 -Copyright 1984, 1995, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::incompletebeta(double a, double b, double x); +
    void alglib::filterema( + real_1d_array& x, + double alpha, + const xparams _params = alglib::xdefault); +void alglib::filterema( + real_1d_array& x, + ae_int_t n, + double alpha, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Inverse of imcomplete beta integral +Filters: linear regression moving averages. -Given y, the function finds x such that +This filter replaces array by results of LRMA(K) filter. - incbet( a, b, x ) = y . +LRMA(K) is defined as filter which, for each data point, builds linear +regression model using K prevous points (point itself is included in +these K points) and calculates value of this linear model at the point in +question. -The routine performs interval halving or Newton iterations to find the -root of incbet(a,b,x) - y = 0. +INPUT PARAMETERS: + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + K - K>=1 (K can be larger than N , such cases will be + correctly handled). Window width. K=1 corresponds to + identity transformation (nothing changes). +OUTPUT PARAMETERS: + X - array, whose first N elements were processed with SMA(K) -ACCURACY: +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. - Relative error: - x a,b -arithmetic domain domain # trials peak rms - IEEE 0,1 .5,10000 50000 5.8e-12 1.3e-13 - IEEE 0,1 .25,100 100000 1.8e-13 3.9e-15 - IEEE 0,1 0,5 50000 1.1e-12 5.5e-15 -With a and b constrained to half-integer or integer values: - IEEE 0,1 .5,10000 50000 5.8e-12 1.1e-13 - IEEE 0,1 .5,100 100000 1.7e-14 7.9e-16 -With a = .5, b constrained to half-integer or integer values: - IEEE 0,1 .5,10000 10000 8.3e-11 1.0e-11 +NOTE 2: this algorithm makes only one pass through array and uses running + sum to speed-up calculation of the averages. Additional measures + are taken to ensure that running sum on a long sequence of zero + elements will be correctly reset to zero even in the presence of + round-off error. -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1996, 2000 by Stephen L. Moshier -*************************************************************************/ -
    double alglib::invincompletebeta(double a, double b, double y); +NOTE 3: this is unsymmetric version of the algorithm, which does NOT + averages points after the current one. Only X[i], X[i-1], ... are + used when calculating new value of X[i]. We should also note that + this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. -
    - - - -
    -
    /************************************************************************* -IDW interpolant. + -- ALGLIB -- + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -
    class idwinterpolant -{ -}; +
    void alglib::filterlrma( + real_1d_array& x, + ae_int_t k, + const xparams _params = alglib::xdefault); +void alglib::filterlrma( + real_1d_array& x, + ae_int_t n, + ae_int_t k, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -IDW interpolant using modified Shepard method for uniform point -distributions. +Filters: simple moving averages (unsymmetric). + +This filter replaces array by results of SMA(K) filter. SMA(K) is defined +as filter which averages at most K previous points (previous - not points +AROUND central point) - or less, in case of the first K-1 points. INPUT PARAMETERS: - XY - X and Y values, array[0..N-1,0..NX]. - First NX columns contain X-values, last column contain - Y-values. - N - number of nodes, N>0. - NX - space dimension, NX>=1. - D - nodal function type, either: - * 0 constant model. Just for demonstration only, worst - model ever. - * 1 linear model, least squares fitting. Simpe model for - datasets too small for quadratic models - * 2 quadratic model, least squares fitting. Best model - available (if your dataset is large enough). - * -1 "fast" linear model, use with caution!!! It is - significantly faster than linear/quadratic and better - than constant model. But it is less robust (especially - in the presence of noise). - NQ - number of points used to calculate nodal functions (ignored - for constant models). NQ should be LARGER than: - * max(1.5*(1+NX),2^NX+1) for linear model, - * max(3/4*(NX+2)*(NX+1),2^NX+1) for quadratic model. - Values less than this threshold will be silently increased. - NW - number of points used to calculate weights and to interpolate. - Required: >=2^NX+1, values less than this threshold will be - silently increased. - Recommended value: about 2*NQ + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + K - K>=1 (K can be larger than N , such cases will be + correctly handled). Window width. K=1 corresponds to + identity transformation (nothing changes). OUTPUT PARAMETERS: - Z - IDW interpolant. + X - array, whose first N elements were processed with SMA(K) -NOTES: - * best results are obtained with quadratic models, worst - with constant - models - * when N is large, NQ and NW must be significantly smaller than N both - to obtain optimal performance and to obtain optimal accuracy. In 2 or - 3-dimensional tasks NQ=15 and NW=25 are good values to start with. - * NQ and NW may be greater than N. In such cases they will be - automatically decreased. - * this subroutine is always succeeds (as long as correct parameters are - passed). - * see 'Multivariate Interpolation of Large Sets of Scattered Data' by - Robert J. Renka for more information on this algorithm. - * this subroutine assumes that point distribution is uniform at the small - scales. If it isn't - for example, points are concentrated along - "lines", but "lines" distribution is uniform at the larger scale - then - you should use IDWBuildModifiedShepardR() +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. + +NOTE 2: this algorithm makes only one pass through array and uses running + sum to speed-up calculation of the averages. Additional measures + are taken to ensure that running sum on a long sequence of zero + elements will be correctly reset to zero even in the presence of + round-off error. +NOTE 3: this is unsymmetric version of the algorithm, which does NOT + averages points after the current one. Only X[i], X[i-1], ... are + used when calculating new value of X[i]. We should also note that + this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. - -- ALGLIB PROJECT -- - Copyright 02.03.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::idwbuildmodifiedshepard( - real_2d_array xy, +
    void alglib::filtersma( + real_1d_array& x, + ae_int_t k, + const xparams _params = alglib::xdefault); +void alglib::filtersma( + real_1d_array& x, ae_int_t n, - ae_int_t nx, - ae_int_t d, - ae_int_t nq, - ae_int_t nw, - idwinterpolant& z); + ae_int_t k, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -IDW interpolant using modified Shepard method for non-uniform datasets. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" -This type of model uses constant nodal functions and interpolates using -all nodes which are closer than user-specified radius R. It may be used -when points distribution is non-uniform at the small scale, but it is at -the distances as large as R. +using namespace alglib; -INPUT PARAMETERS: - XY - X and Y values, array[0..N-1,0..NX]. - First NX columns contain X-values, last column contain - Y-values. - N - number of nodes, N>0. - NX - space dimension, NX>=1. - R - radius, R>0 -OUTPUT PARAMETERS: - Z - IDW interpolant. +int main(int argc, char **argv) +{ + // + // Here we demonstrate EMA(0.5) filtering for time series. + // + real_1d_array x = "[5,6,7,8]"; -NOTES: -* if there is less than IDWKMin points within R-ball, algorithm selects - IDWKMin closest ones, so that continuity properties of interpolant are - preserved even far from points. + // + // Apply filter. + // We should get [5, 5.5, 6.25, 7.125] as result + // + filterema(x, 0.5); + printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [5,5.5,6.25,7.125] + return 0; +} - -- ALGLIB PROJECT -- - Copyright 11.04.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::idwbuildmodifiedshepardr( - real_2d_array xy, - ae_int_t n, - ae_int_t nx, - double r, - idwinterpolant& z); -
    - +
    -
    /************************************************************************* -IDW model for noisy data. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" -This subroutine may be used to handle noisy data, i.e. data with noise in -OUTPUT values. It differs from IDWBuildModifiedShepard() in the following -aspects: -* nodal functions are not constrained to pass through nodes: Qi(xi)<>yi, - i.e. we have fitting instead of interpolation. -* weights which are used during least squares fitting stage are all equal - to 1.0 (independently of distance) -* "fast"-linear or constant nodal functions are not supported (either not - robust enough or too rigid) - -This problem require far more complex tuning than interpolation problems. -Below you can find some recommendations regarding this problem: -* focus on tuning NQ; it controls noise reduction. As for NW, you can just - make it equal to 2*NQ. -* you can use cross-validation to determine optimal NQ. -* optimal NQ is a result of complex tradeoff between noise level (more - noise = larger NQ required) and underlying function complexity (given - fixed N, larger NQ means smoothing of compex features in the data). For - example, NQ=N will reduce noise to the minimum level possible, but you - will end up with just constant/linear/quadratic (depending on D) least - squares model for the whole dataset. - -INPUT PARAMETERS: - XY - X and Y values, array[0..N-1,0..NX]. - First NX columns contain X-values, last column contain - Y-values. - N - number of nodes, N>0. - NX - space dimension, NX>=1. - D - nodal function degree, either: - * 1 linear model, least squares fitting. Simpe model for - datasets too small for quadratic models (or for very - noisy problems). - * 2 quadratic model, least squares fitting. Best model - available (if your dataset is large enough). - NQ - number of points used to calculate nodal functions. NQ should - be significantly larger than 1.5 times the number of - coefficients in a nodal function to overcome effects of noise: - * larger than 1.5*(1+NX) for linear model, - * larger than 3/4*(NX+2)*(NX+1) for quadratic model. - Values less than this threshold will be silently increased. - NW - number of points used to calculate weights and to interpolate. - Required: >=2^NX+1, values less than this threshold will be - silently increased. - Recommended value: about 2*NQ or larger +using namespace alglib; -OUTPUT PARAMETERS: - Z - IDW interpolant. -NOTES: - * best results are obtained with quadratic models, linear models are not - recommended to use unless you are pretty sure that it is what you want - * this subroutine is always succeeds (as long as correct parameters are - passed). - * see 'Multivariate Interpolation of Large Sets of Scattered Data' by - Robert J. Renka for more information on this algorithm. +int main(int argc, char **argv) +{ + // + // Here we demonstrate LRMA(3) filtering for time series. + // + real_1d_array x = "[7,8,8,9,12,12]"; + + // + // Apply filter. + // We should get [7.0000, 8.0000, 8.1667, 8.8333, 11.6667, 12.5000] as result + // + filterlrma(x, 3); + printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [7.0000,8.0000,8.1667,8.8333,11.6667,12.5000] + return 0; +} - -- ALGLIB PROJECT -- - Copyright 02.03.2010 by Bochkanov Sergey +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // Here we demonstrate SMA(k) filtering for time series.
    +    //
    +    real_1d_array x = "[5,6,7,8]";
    +
    +    //
    +    // Apply filter.
    +    // We should get [5, 5.5, 6.5, 7.5] as result
    +    //
    +    filtersma(x, 2);
    +    printf("%s\n", x.tostring(4).c_str()); // EXPECTED: [5,5.5,6.5,7.5]
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +Fits least squares (LS) circle (or NX-dimensional sphere) to data (a set +of points in NX-dimensional space). + +Least squares circle minimizes sum of squared deviations between distances +from points to the center and some "candidate" radius, which is also +fitted to the data. + +INPUT PARAMETERS: + XY - array[NPoints,NX] (or larger), contains dataset. + One row = one point in NX-dimensional space. + NPoints - dataset size, NPoints>0 + NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) + +OUTPUT PARAMETERS: + CX - central point for a sphere + R - radius + + -- ALGLIB -- + Copyright 07.05.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::idwbuildnoisy( +
    void alglib::fitspherels( real_2d_array xy, - ae_int_t n, + ae_int_t npoints, ae_int_t nx, - ae_int_t d, - ae_int_t nq, - ae_int_t nw, - idwinterpolant& z); + real_1d_array& cx, + double& r, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -IDW interpolation +Fits minimum circumscribed (MC) circle (or NX-dimensional sphere) to data +(a set of points in NX-dimensional space). INPUT PARAMETERS: - Z - IDW interpolant built with one of model building - subroutines. - X - array[0..NX-1], interpolation point + XY - array[NPoints,NX] (or larger), contains dataset. + One row = one point in NX-dimensional space. + NPoints - dataset size, NPoints>0 + NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) + +OUTPUT PARAMETERS: + CX - central point for a sphere + RHi - radius + +NOTE: this function is an easy-to-use wrapper around more powerful "expert" + function fitspherex(). + + This wrapper is optimized for ease of use and stability - at the + cost of somewhat lower performance (we have to use very tight + stopping criteria for inner optimizer because we want to make sure + that it will converge on any dataset). + + If you are ready to experiment with settings of "expert" function, + you can achieve ~2-4x speedup over standard "bulletproof" settings. -Result: - IDW interpolant Z(X) -- ALGLIB -- - Copyright 02.03.2010 by Bochkanov Sergey + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::idwcalc(idwinterpolant z, real_1d_array x); +
    void alglib::fitspheremc( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nx, + real_1d_array& cx, + double& rhi, + const xparams _params = alglib::xdefault);
    - - - +
     
    /************************************************************************* -Incomplete gamma integral +Fits maximum inscribed circle (or NX-dimensional sphere) to data (a set of +points in NX-dimensional space). -The function is defined by +INPUT PARAMETERS: + XY - array[NPoints,NX] (or larger), contains dataset. + One row = one point in NX-dimensional space. + NPoints - dataset size, NPoints>0 + NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) - x - - - 1 | | -t a-1 - igam(a,x) = ----- | e t dt. - - | | - | (a) - - 0 +OUTPUT PARAMETERS: + CX - central point for a sphere + RLo - radius +NOTE: this function is an easy-to-use wrapper around more powerful "expert" + function fitspherex(). -In this implementation both arguments must be positive. -The integral is evaluated by either a power series or -continued fraction expansion, depending on the relative -values of a and x. + This wrapper is optimized for ease of use and stability - at the + cost of somewhat lower performance (we have to use very tight + stopping criteria for inner optimizer because we want to make sure + that it will converge on any dataset). -ACCURACY: + If you are ready to experiment with settings of "expert" function, + you can achieve ~2-4x speedup over standard "bulletproof" settings. - Relative error: -arithmetic domain # trials peak rms - IEEE 0,30 200000 3.6e-14 2.9e-15 - IEEE 0,100 300000 9.9e-14 1.5e-14 -Cephes Math Library Release 2.8: June, 2000 -Copyright 1985, 1987, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::incompletegamma(double a, double x); +
    void alglib::fitspheremi( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nx, + real_1d_array& cx, + double& rlo, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complemented incomplete gamma integral +Fits minimum zone circle (or NX-dimensional sphere) to data (a set of +points in NX-dimensional space). -The function is defined by +INPUT PARAMETERS: + XY - array[NPoints,NX] (or larger), contains dataset. + One row = one point in NX-dimensional space. + NPoints - dataset size, NPoints>0 + NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) +OUTPUT PARAMETERS: + CX - central point for a sphere + RLo - radius of inscribed circle + RHo - radius of circumscribed circle - igamc(a,x) = 1 - igam(a,x) +NOTE: this function is an easy-to-use wrapper around more powerful "expert" + function fitspherex(). - inf. - - - 1 | | -t a-1 - = ----- | e t dt. - - | | - | (a) - - x + This wrapper is optimized for ease of use and stability - at the + cost of somewhat lower performance (we have to use very tight + stopping criteria for inner optimizer because we want to make sure + that it will converge on any dataset). + If you are ready to experiment with settings of "expert" function, + you can achieve ~2-4x speedup over standard "bulletproof" settings. -In this implementation both arguments must be positive. -The integral is evaluated by either a power series or -continued fraction expansion, depending on the relative -values of a and x. -ACCURACY: + -- ALGLIB -- + Copyright 14.04.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::fitspheremz( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nx, + real_1d_array& cx, + double& rlo, + double& rhi, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Fitting minimum circumscribed, maximum inscribed or minimum zone circles +(or NX-dimensional spheres) to data (a set of points in NX-dimensional +space). + +This is expert function which allows to tweak many parameters of +underlying nonlinear solver: +* stopping criteria for inner iterations +* number of outer iterations +* penalty coefficient used to handle nonlinear constraints (we convert + unconstrained nonsmooth optimization problem ivolving max() and/or min() + operations to quadratically constrained smooth one). + +You may tweak all these parameters or only some of them, leaving other +ones at their default state - just specify zero value, and solver will +fill it with appropriate default one. + +These comments also include some discussion of approach used to handle +such unusual fitting problem, its stability, drawbacks of alternative +methods, and convergence properties. + +INPUT PARAMETERS: + XY - array[NPoints,NX] (or larger), contains dataset. + One row = one point in NX-dimensional space. + NPoints - dataset size, NPoints>0 + NX - space dimensionality, NX>0 (1, 2, 3, 4, 5 and so on) + ProblemType-used to encode problem type: + * 0 for least squares circle + * 1 for minimum circumscribed circle/sphere fitting (MC) + * 2 for maximum inscribed circle/sphere fitting (MI) + * 3 for minimum zone circle fitting (difference between + Rhi and Rlo is minimized), denoted as MZ + EpsX - stopping condition for NLC optimizer: + * must be non-negative + * use 0 to choose default value (1.0E-12 is used by default) + * you may specify larger values, up to 1.0E-6, if you want + to speed-up solver; NLC solver performs several + preconditioned outer iterations, so final result + typically has precision much better than EpsX. + AULIts - number of outer iterations performed by NLC optimizer: + * must be non-negative + * use 0 to choose default value (20 is used by default) + * you may specify values smaller than 20 if you want to + speed up solver; 10 often results in good combination of + precision and speed; sometimes you may get good results + with just 6 outer iterations. + Ignored for ProblemType=0. + Penalty - penalty coefficient for NLC optimizer: + * must be non-negative + * use 0 to choose default value (1.0E6 in current version) + * it should be really large, 1.0E6...1.0E7 is a good value + to start from; + * generally, default value is good enough + Ignored for ProblemType=0. + +OUTPUT PARAMETERS: + CX - central point for a sphere + RLo - radius: + * for ProblemType=2,3, radius of the inscribed sphere + * for ProblemType=0 - radius of the least squares sphere + * for ProblemType=1 - zero + RHo - radius: + * for ProblemType=1,3, radius of the circumscribed sphere + * for ProblemType=0 - radius of the least squares sphere + * for ProblemType=2 - zero + +NOTE: ON THE UNIQUENESS OF SOLUTIONS + +ALGLIB provides solution to several related circle fitting problems: MC +(minimum circumscribed), MI (maximum inscribed) and MZ (minimum zone) +fitting, LS (least squares) fitting. + +It is important to note that among these problems only MC and LS are +convex and have unique solution independently from starting point. + +As for MI, it may (or may not, depending on dataset properties) have +multiple solutions, and it always has one degenerate solution C=infinity +which corresponds to infinitely large radius. Thus, there are no guarantees +that solution to MI returned by this solver will be the best one (and no +one can provide you with such guarantee because problem is NP-hard). The +only guarantee you have is that this solution is locally optimal, i.e. it +can not be improved by infinitesimally small tweaks in the parameters. + +It is also possible to "run away" to infinity when started from bad +initial point located outside of point cloud (or when point cloud does not +span entire circumference/surface of the sphere). + +Finally, MZ (minimum zone circle) stands somewhere between MC and MI in +stability. It is somewhat regularized by "circumscribed" term of the merit +function; however, solutions to MZ may be non-unique, and in some unlucky +cases it is also possible to "run away to infinity". + + +NOTE: ON THE NONLINEARLY CONSTRAINED PROGRAMMING APPROACH + +The problem formulation for MC (minimum circumscribed circle; for the +sake of simplicity we omit MZ and MI here) is: + + [ [ ]2 ] + min [ max [ XY[i]-C ] ] + C [ i [ ] ] + +i.e. it is unconstrained nonsmooth optimization problem of finding "best" +central point, with radius R being unambiguously determined from C. In +order to move away from non-smoothness we use following reformulation: + + [ ] [ ]2 + min [ R ] subject to R>=0, [ XY[i]-C ] <= R^2 + C,R [ ] [ ] + +i.e. it becomes smooth quadratically constrained optimization problem with +linear target function. Such problem statement is 100% equivalent to the +original nonsmooth one, but much easier to approach. We solve it with +MinNLC solver provided by ALGLIB. + + +NOTE: ON INSTABILITY OF SEQUENTIAL LINEARIZATION APPROACH + +ALGLIB has nonlinearly constrained solver which proved to be stable on +such problems. However, some authors proposed to linearize constraints in +the vicinity of current approximation (Ci,Ri) and to get next approximate +solution (Ci+1,Ri+1) as solution to linear programming problem. Obviously, +LP problems are easier than nonlinearly constrained ones. + +Indeed, such approach to MC/MI/MZ resulted in ~10-20x increase in +performance (when compared with NLC solver). However, it turned out that +in some cases linearized model fails to predict correct direction for next +step and tells us that we converged to solution even when we are still 2-4 +digits of precision away from it. + +It is important that it is not failure of LP solver - it is failure of the +linear model; even when solved exactly, it fails to handle subtle +nonlinearities which arise near the solution. We validated it by comparing +results returned by ALGLIB linear solver with that of MATLAB. + +In our experiments with linearization: +* MC failed most often, at both realistic and synthetic datasets +* MI sometimes failed, but sometimes succeeded +* MZ often succeeded; our guess is that presence of two independent sets + of constraints (one set for Rlo and another one for Rhi) and two terms + in the target function (Rlo and Rhi) regularizes task, so when linear + model fails to handle nonlinearities from Rlo, it uses Rhi as a hint + (and vice versa). -Tested at random a, x. - a x Relative error: -arithmetic domain domain # trials peak rms - IEEE 0.5,100 0,100 200000 1.9e-14 1.7e-15 - IEEE 0.01,0.5 0,100 200000 1.4e-13 1.6e-15 +Because linearization approach failed to achieve stable results, we do not +include it in ALGLIB. -Cephes Math Library Release 2.8: June, 2000 -Copyright 1985, 1987, 2000 by Stephen L. Moshier + + -- ALGLIB -- + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::incompletegammac(double a, double x); +
    void alglib::fitspherex( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nx, + ae_int_t problemtype, + double epsx, + ae_int_t aulits, + double penalty, + real_1d_array& cx, + double& rlo, + double& rhi, + const xparams _params = alglib::xdefault);
    - + +
    + +fresnelintegral
    + + +
    +
     
    /************************************************************************* -Inverse of complemented imcomplete gamma integral - -Given p, the function finds x such that +Fresnel integral - igamc( a, x ) = p. +Evaluates the Fresnel integrals -Starting with the approximate value + x + - + | | +C(x) = | cos(pi/2 t**2) dt, + | | + - + 0 - 3 - x = a t + x + - + | | +S(x) = | sin(pi/2 t**2) dt. + | | + - + 0 - where - t = 1 - d - ndtri(p) sqrt(d) +The integrals are evaluated by a power series for x < 1. +For x >= 1 auxiliary functions f(x) and g(x) are employed +such that -and +C(x) = 0.5 + f(x) sin( pi/2 x**2 ) - g(x) cos( pi/2 x**2 ) +S(x) = 0.5 - f(x) cos( pi/2 x**2 ) - g(x) sin( pi/2 x**2 ) - d = 1/9a, -the routine performs up to 10 Newton iterations to find the -root of igamc(a,x) - p = 0. ACCURACY: -Tested at random a, p in the intervals indicated. + Relative error. - a p Relative error: -arithmetic domain domain # trials peak rms - IEEE 0.5,100 0,0.5 100000 1.0e-14 1.7e-15 - IEEE 0.01,0.5 0,0.5 100000 9.0e-14 3.4e-15 - IEEE 0.5,10000 0,0.5 20000 2.3e-13 3.8e-14 +Arithmetic function domain # trials peak rms + IEEE S(x) 0, 10 10000 2.0e-15 3.2e-16 + IEEE C(x) 0, 10 10000 1.8e-15 3.3e-16 Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1989, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::invincompletegammac(double a, double y0); +
    void alglib::fresnelintegral( + double x, + double& c, + double& s, + const xparams _params = alglib::xdefault);
    - + - +
     
    /************************************************************************* -Inverse matrix update by the Sherman-Morrison formula - -The algorithm updates matrix A^-1 when adding a vector to a column -of matrix A. +Gamma function Input parameters: - InvA - inverse of matrix A. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - UpdColumn - the column of A whose vector U was added. - 0 <= UpdColumn <= N-1 - U - the vector to be added to a column. - Array whose index ranges within [0..N-1]. + X - argument -Output parameters: - InvA - inverse of modified matrix A. +Domain: + 0 < X < 171.6 + -170 < X < 0, X is not an integer. - -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey +Relative error: + arithmetic domain # trials peak rms + IEEE -170,-33 20000 2.3e-15 3.3e-16 + IEEE -33, 33 20000 9.4e-16 2.2e-16 + IEEE 33, 171.6 20000 2.3e-15 3.2e-16 + +Cephes Math Library Release 2.8: June, 2000 +Original copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier +Translated to AlgoPascal by Bochkanov Sergey (2005, 2006, 2007). *************************************************************************/ -
    void alglib::rmatrixinvupdatecolumn( - real_2d_array& inva, - ae_int_t n, - ae_int_t updcolumn, - real_1d_array u); +
    double alglib::gammafunction( + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Inverse matrix update by the Sherman-Morrison formula - -The algorithm updates matrix A^-1 when adding a vector to a row -of matrix A. +Natural logarithm of gamma function Input parameters: - InvA - inverse of matrix A. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - UpdRow - the row of A whose vector V was added. - 0 <= Row <= N-1 - V - the vector to be added to a row. - Array whose index ranges within [0..N-1]. + X - argument + +Result: + logarithm of the absolute value of the Gamma(X). Output parameters: - InvA - inverse of modified matrix A. + SgnGam - sign(Gamma(X)) - -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey +Domain: + 0 < X < 2.55e305 + -2.55e305 < X < 0, X is not an integer. + +ACCURACY: +arithmetic domain # trials peak rms + IEEE 0, 3 28000 5.4e-16 1.1e-16 + IEEE 2.718, 2.556e305 40000 3.5e-16 8.3e-17 +The error criterion was relative when the function magnitude +was greater than one but absolute when it was less than one. + +The following test used the relative error criterion, though +at certain points the relative error could be much higher than +indicated. + IEEE -200, -4 10000 4.8e-16 1.3e-16 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1989, 1992, 2000 by Stephen L. Moshier +Translated to AlgoPascal by Bochkanov Sergey (2005, 2006, 2007). *************************************************************************/ -
    void alglib::rmatrixinvupdaterow( - real_2d_array& inva, - ae_int_t n, - ae_int_t updrow, - real_1d_array v); +
    double alglib::lngamma( + double x, + double& sgngam, + const xparams _params = alglib::xdefault);
    - + + +
     
    /************************************************************************* -Inverse matrix update by the Sherman-Morrison formula +Returns Gauss and Gauss-Kronrod nodes/weights for Gauss-Jacobi +quadrature on [-1,1] with weight function -The algorithm updates matrix A^-1 when adding a number to an element -of matrix A. + W(x)=Power(1-x,Alpha)*Power(1+x,Beta). -Input parameters: - InvA - inverse of matrix A. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - UpdRow - row where the element to be updated is stored. - UpdColumn - column where the element to be updated is stored. - UpdVal - a number to be added to the element. +INPUT PARAMETERS: + N - number of Kronrod nodes, must be odd number, >=3. + Alpha - power-law coefficient, Alpha>-1 + Beta - power-law coefficient, Beta>-1 +OUTPUT PARAMETERS: + Info - error code: + * -5 no real and positive Gauss-Kronrod formula can + be created for such a weight function with a + given number of nodes. + * -4 an error was detected when calculating + weights/nodes. Alpha or Beta are too close + to -1 to obtain weights/nodes with high enough + accuracy, or, may be, N is too large. Try to + use multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N was passed + * +1 OK + * +2 OK, but quadrature rule have exterior nodes, + x[0]<-1 or x[n-1]>+1 + X - array[0..N-1] - array of quadrature nodes, ordered in + ascending order. + WKronrod - array[0..N-1] - Kronrod weights + WGauss - array[0..N-1] - Gauss weights (interleaved with zeros + corresponding to extended Kronrod nodes). -Output parameters: - InvA - inverse of modified matrix A. -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixinvupdatesimple( - real_2d_array& inva, +
    void alglib::gkqgenerategaussjacobi( ae_int_t n, - ae_int_t updrow, - ae_int_t updcolumn, - double updval); + double alpha, + double beta, + ae_int_t& info, + real_1d_array& x, + real_1d_array& wkronrod, + real_1d_array& wgauss, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Inverse matrix update by the Sherman-Morrison formula +Returns Gauss and Gauss-Kronrod nodes/weights for Gauss-Legendre +quadrature with N points. -The algorithm computes the inverse of matrix A+u*v’ by using the given matrix -A^-1 and the vectors u and v. +GKQLegendreCalc (calculation) or GKQLegendreTbl (precomputed table) is +used depending on machine precision and number of nodes. -Input parameters: - InvA - inverse of matrix A. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - U - the vector modifying the matrix. - Array whose index ranges within [0..N-1]. - V - the vector modifying the matrix. - Array whose index ranges within [0..N-1]. +INPUT PARAMETERS: + N - number of Kronrod nodes, must be odd number, >=3. + +OUTPUT PARAMETERS: + Info - error code: + * -4 an error was detected when calculating + weights/nodes. N is too large to obtain + weights/nodes with high enough accuracy. + Try to use multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, ordered in + ascending order. + WKronrod - array[0..N-1] - Kronrod weights + WGauss - array[0..N-1] - Gauss weights (interleaved with zeros + corresponding to extended Kronrod nodes). -Output parameters: - InvA - inverse of matrix A + u*v'. -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixinvupdateuv( - real_2d_array& inva, +
    void alglib::gkqgenerategausslegendre( ae_int_t n, - real_1d_array u, - real_1d_array v); + ae_int_t& info, + real_1d_array& x, + real_1d_array& wkronrod, + real_1d_array& wgauss, + const xparams _params = alglib::xdefault);
    - -
    - -jacobianellipticfunctions
    - - -
    - +
     
    /************************************************************************* -Jacobian Elliptic Functions - -Evaluates the Jacobian elliptic functions sn(u|m), cn(u|m), -and dn(u|m) of parameter m between 0 and 1, and real -argument u. +Computation of nodes and weights of a Gauss-Kronrod quadrature formula -These functions are periodic, with quarter-period on the -real axis equal to the complete elliptic integral -ellpk(1.0-m). +The algorithm generates the N-point Gauss-Kronrod quadrature formula with +weight function given by coefficients alpha and beta of a recurrence +relation which generates a system of orthogonal polynomials: -Relation to incomplete elliptic integral: -If u = ellik(phi,m), then sn(u|m) = sin(phi), -and cn(u|m) = cos(phi). Phi is called the amplitude of u. + P-1(x) = 0 + P0(x) = 1 + Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) -Computation is by means of the arithmetic-geometric mean -algorithm, except when m is within 1e-9 of 0 or 1. In the -latter case with m close to 1, the approximation applies -only for phi < pi/2. +and zero moment Mu0 -ACCURACY: + Mu0 = integral(W(x)dx,a,b) -Tested at random points with u between 0 and 10, m between -0 and 1. - Absolute error (* = relative error): -arithmetic function # trials peak rms - IEEE phi 10000 9.2e-16* 1.4e-16* - IEEE sn 50000 4.1e-15 4.6e-16 - IEEE cn 40000 3.6e-15 4.4e-16 - IEEE dn 10000 1.3e-12 1.8e-14 +INPUT PARAMETERS: + Alpha - alpha coefficients, array[0..floor(3*K/2)]. + Beta - beta coefficients, array[0..ceil(3*K/2)]. + Beta[0] is not used and may be arbitrary. + Beta[I]>0. + Mu0 - zeroth moment of the weight function. + N - number of nodes of the Gauss-Kronrod quadrature formula, + N >= 3, + N = 2*K+1. - Peak error observed in consistency check using addition -theorem for sn(u+v) was 4e-16 (absolute). Also tested by -the above relation to the incomplete elliptic integral. -Accuracy deteriorates when u is large. +OUTPUT PARAMETERS: + Info - error code: + * -5 no real and positive Gauss-Kronrod formula can + be created for such a weight function with a + given number of nodes. + * -4 N is too large, task may be ill conditioned - + x[i]=x[i+1] found. + * -3 internal eigenproblem solver hasn't converged + * -2 Beta[i]<=0 + * -1 incorrect N was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + WKronrod - array[0..N-1] - Kronrod weights + WGauss - array[0..N-1] - Gauss weights (interleaved with zeros + corresponding to extended Kronrod nodes). -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 2000 by Stephen L. Moshier + -- ALGLIB -- + Copyright 08.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::jacobianellipticfunctions( - double u, - double m, - double& sn, - double& cn, - double& dn, - double& ph); +
    void alglib::gkqgeneraterec( + real_1d_array alpha, + real_1d_array beta, + double mu0, + ae_int_t n, + ae_int_t& info, + real_1d_array& x, + real_1d_array& wkronrod, + real_1d_array& wgauss, + const xparams _params = alglib::xdefault);
    - -
    - -jarqueberatest
    - - -
    - +
     
    /************************************************************************* -Jarque-Bera test +Returns Gauss and Gauss-Kronrod nodes for quadrature with N points. -This test checks hypotheses about the fact that a given sample X is a -sample of normal random variable. +Reduction to tridiagonal eigenproblem is used. -Requirements: - * the number of elements in the sample is not less than 5. +INPUT PARAMETERS: + N - number of Kronrod nodes, must be odd number, >=3. -Input parameters: - X - sample. Array whose index goes from 0 to N-1. - N - size of the sample. N>=5 +OUTPUT PARAMETERS: + Info - error code: + * -4 an error was detected when calculating + weights/nodes. N is too large to obtain + weights/nodes with high enough accuracy. + Try to use multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, ordered in + ascending order. + WKronrod - array[0..N-1] - Kronrod weights + WGauss - array[0..N-1] - Gauss weights (interleaved with zeros + corresponding to extended Kronrod nodes). -Output parameters: - P - p-value for the test + -- ALGLIB -- + Copyright 12.05.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::gkqlegendrecalc( + ae_int_t n, + ae_int_t& info, + real_1d_array& x, + real_1d_array& wkronrod, + real_1d_array& wgauss, + const xparams _params = alglib::xdefault); -Accuracy of the approximation used (5<=N<=1951): +
    + +
    +
    /************************************************************************* +Returns Gauss and Gauss-Kronrod nodes for quadrature with N points using +pre-calculated table. Nodes/weights were computed with accuracy up to +1.0E-32 (if MPFR version of ALGLIB is used). In standard double precision +accuracy reduces to something about 2.0E-16 (depending on your compiler's +handling of long floating point constants). -p-value relative error (5<=N<=1951) -[1, 0.1] < 1% -[0.1, 0.01] < 2% -[0.01, 0.001] < 6% -[0.001, 0] wasn't measured +INPUT PARAMETERS: + N - number of Kronrod nodes. + N can be 15, 21, 31, 41, 51, 61. + +OUTPUT PARAMETERS: + X - array[0..N-1] - array of quadrature nodes, ordered in + ascending order. + WKronrod - array[0..N-1] - Kronrod weights + WGauss - array[0..N-1] - Gauss weights (interleaved with zeros + corresponding to extended Kronrod nodes). -For N>1951 accuracy wasn't measured but it shouldn't be sharply different -from table values. -- ALGLIB -- - Copyright 09.04.2007 by Bochkanov Sergey + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::jarqueberatest(real_1d_array x, ae_int_t n, double& p); +
    void alglib::gkqlegendretbl( + ae_int_t n, + real_1d_array& x, + real_1d_array& wkronrod, + real_1d_array& wgauss, + double& eps, + const xparams _params = alglib::xdefault);
    - + - +
     
    /************************************************************************* -Calculation of the value of the Laguerre polynomial. +Returns nodes/weights for Gauss-Hermite quadrature on (-inf,+inf) with +weight function W(x)=Exp(-x*x) -Parameters: - n - degree, n>=0 - x - argument +INPUT PARAMETERS: + N - number of nodes, >=1 -Result: - the value of the Laguerre polynomial Ln at x +OUTPUT PARAMETERS: + Info - error code: + * -4 an error was detected when calculating + weights/nodes. May be, N is too large. Try to + use multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N/Alpha was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. + + + -- ALGLIB -- + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::laguerrecalculate(ae_int_t n, double x); +
    void alglib::gqgenerategausshermite( + ae_int_t n, + ae_int_t& info, + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Representation of Ln as C[0] + C[1]*X + ... + C[N]*X^N +Returns nodes/weights for Gauss-Jacobi quadrature on [-1,1] with weight +function W(x)=Power(1-x,Alpha)*Power(1+x,Beta). -Input parameters: - N - polynomial degree, n>=0 +INPUT PARAMETERS: + N - number of nodes, >=1 + Alpha - power-law coefficient, Alpha>-1 + Beta - power-law coefficient, Beta>-1 -Output parameters: - C - coefficients +OUTPUT PARAMETERS: + Info - error code: + * -4 an error was detected when calculating + weights/nodes. Alpha or Beta are too close + to -1 to obtain weights/nodes with high enough + accuracy, or, may be, N is too large. Try to + use multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N/Alpha/Beta was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. + + + -- ALGLIB -- + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::laguerrecoefficients(ae_int_t n, real_1d_array& c); +
    void alglib::gqgenerategaussjacobi( + ae_int_t n, + double alpha, + double beta, + ae_int_t& info, + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Summation of Laguerre polynomials using Clenshaw’s recurrence formula. +Returns nodes/weights for Gauss-Laguerre quadrature on [0,+inf) with +weight function W(x)=Power(x,Alpha)*Exp(-x) -This routine calculates c[0]*L0(x) + c[1]*L1(x) + ... + c[N]*LN(x) +INPUT PARAMETERS: + N - number of nodes, >=1 + Alpha - power-law coefficient, Alpha>-1 -Parameters: - n - degree, n>=0 - x - argument +OUTPUT PARAMETERS: + Info - error code: + * -4 an error was detected when calculating + weights/nodes. Alpha is too close to -1 to + obtain weights/nodes with high enough accuracy + or, may be, N is too large. Try to use + multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N/Alpha was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. -Result: - the value of the Laguerre polynomial at x + + -- ALGLIB -- + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::laguerresum(real_1d_array c, ae_int_t n, double x); +
    void alglib::gqgenerategausslaguerre( + ae_int_t n, + double alpha, + ae_int_t& info, + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    - -
    - -fisherlda
    -fisherldan
    - - -
    - +
     
    /************************************************************************* -Multiclass Fisher LDA - -Subroutine finds coefficients of linear combination which optimally separates -training set on classes. - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Best results are achieved for high-dimensional problems - ! (NVars is at least 256). - ! - ! Multithreading is used to accelerate initial phase of LDA, which - ! includes calculation of products of large matrices. Again, for best - ! efficiency problem must be high-dimensional. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Returns nodes/weights for Gauss-Legendre quadrature on [-1,1] with N +nodes. INPUT PARAMETERS: - XY - training set, array[0..NPoints-1,0..NVars]. - First NVars columns store values of independent - variables, next column stores number of class (from 0 - to NClasses-1) which dataset element belongs to. Fractional - values are rounded to nearest integer. - NPoints - training set size, NPoints>=0 - NVars - number of independent variables, NVars>=1 - NClasses - number of classes, NClasses>=2 - + N - number of nodes, >=1 OUTPUT PARAMETERS: - Info - return code: - * -4, if internal EVD subroutine hasn't converged - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed (NPoints<0, - NVars<1, NClasses<2) - * 1, if task has been solved - * 2, if there was a multicollinearity in training set, - but task has been solved. - W - linear combination coefficients, array[0..NVars-1] + Info - error code: + * -4 an error was detected when calculating + weights/nodes. N is too large to obtain + weights/nodes with high enough accuracy. + Try to use multiple precision version. + * -3 internal eigenproblem solver hasn't converged + * -1 incorrect N was passed + * +1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. + -- ALGLIB -- - Copyright 31.05.2008 by Bochkanov Sergey + Copyright 12.05.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fisherlda( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, +
    void alglib::gqgenerategausslegendre( + ae_int_t n, ae_int_t& info, - real_1d_array& w); + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -N-dimensional multiclass Fisher LDA +Computation of nodes and weights for a Gauss-Lobatto quadrature formula -Subroutine finds coefficients of linear combinations which optimally separates -training set on classes. It returns N-dimensional basis whose vector are sorted -by quality of training set separation (in descending order). +The algorithm generates the N-point Gauss-Lobatto quadrature formula with +weight function given by coefficients alpha and beta of a recurrence which +generates a system of orthogonal polynomials. -COMMERCIAL EDITION OF ALGLIB: +P-1(x) = 0 +P0(x) = 1 +Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Best results are achieved for high-dimensional problems - ! (NVars is at least 256). - ! - ! Multithreading is used to accelerate initial phase of LDA, which - ! includes calculation of products of large matrices. Again, for best - ! efficiency problem must be high-dimensional. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +and zeroth moment Mu0 -INPUT PARAMETERS: - XY - training set, array[0..NPoints-1,0..NVars]. - First NVars columns store values of independent - variables, next column stores number of class (from 0 - to NClasses-1) which dataset element belongs to. Fractional - values are rounded to nearest integer. - NPoints - training set size, NPoints>=0 - NVars - number of independent variables, NVars>=1 - NClasses - number of classes, NClasses>=2 +Mu0 = integral(W(x)dx,a,b) +INPUT PARAMETERS: + Alpha - array[0..N-2], alpha coefficients + Beta - array[0..N-2], beta coefficients. + Zero-indexed element is not used, may be arbitrary. + Beta[I]>0 + Mu0 - zeroth moment of the weighting function. + A - left boundary of the integration interval. + B - right boundary of the integration interval. + N - number of nodes of the quadrature formula, N>=3 + (including the left and right boundary nodes). OUTPUT PARAMETERS: - Info - return code: - * -4, if internal EVD subroutine hasn't converged - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed (NPoints<0, - NVars<1, NClasses<2) - * 1, if task has been solved - * 2, if there was a multicollinearity in training set, - but task has been solved. - W - basis, array[0..NVars-1,0..NVars-1] - columns of matrix stores basis vectors, sorted by - quality of training set separation (in descending order) + Info - error code: + * -3 internal eigenproblem solver hasn't converged + * -2 Beta[i]<=0 + * -1 incorrect N was passed + * 1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. -- ALGLIB -- - Copyright 31.05.2008 by Bochkanov Sergey + Copyright 2005-2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::fisherldan( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, +
    void alglib::gqgenerategausslobattorec( + real_1d_array alpha, + real_1d_array beta, + double mu0, + double a, + double b, + ae_int_t n, ae_int_t& info, - real_2d_array& w); -void alglib::smp_fisherldan( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Computation of nodes and weights for a Gauss-Radau quadrature formula + +The algorithm generates the N-point Gauss-Radau quadrature formula with +weight function given by the coefficients alpha and beta of a recurrence +which generates a system of orthogonal polynomials. + +P-1(x) = 0 +P0(x) = 1 +Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) + +and zeroth moment Mu0 + +Mu0 = integral(W(x)dx,a,b) + +INPUT PARAMETERS: + Alpha - array[0..N-2], alpha coefficients. + Beta - array[0..N-1], beta coefficients + Zero-indexed element is not used. + Beta[I]>0 + Mu0 - zeroth moment of the weighting function. + A - left boundary of the integration interval. + N - number of nodes of the quadrature formula, N>=2 + (including the left boundary node). + +OUTPUT PARAMETERS: + Info - error code: + * -3 internal eigenproblem solver hasn't converged + * -2 Beta[i]<=0 + * -1 incorrect N was passed + * 1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. + + + -- ALGLIB -- + Copyright 2005-2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::gqgenerategaussradaurec( + real_1d_array alpha, + real_1d_array beta, + double mu0, + double a, + ae_int_t n, ae_int_t& info, - real_2d_array& w); + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    - + +
    +
    /************************************************************************* +Computation of nodes and weights for a Gauss quadrature formula + +The algorithm generates the N-point Gauss quadrature formula with weight +function given by coefficients alpha and beta of a recurrence relation +which generates a system of orthogonal polynomials: + +P-1(x) = 0 +P0(x) = 1 +Pn+1(x) = (x-alpha(n))*Pn(x) - beta(n)*Pn-1(x) + +and zeroth moment Mu0 + +Mu0 = integral(W(x)dx,a,b) + +INPUT PARAMETERS: + Alpha - array[0..N-1], alpha coefficients + Beta - array[0..N-1], beta coefficients + Zero-indexed element is not used and may be arbitrary. + Beta[I]>0. + Mu0 - zeroth moment of the weight function. + N - number of nodes of the quadrature formula, N>=1 + +OUTPUT PARAMETERS: + Info - error code: + * -3 internal eigenproblem solver hasn't converged + * -2 Beta[i]<=0 + * -1 incorrect N was passed + * 1 OK + X - array[0..N-1] - array of quadrature nodes, + in ascending order. + W - array[0..N-1] - array of quadrature weights. + + -- ALGLIB -- + Copyright 2005-2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::gqgeneraterec( + real_1d_array alpha, + real_1d_array beta, + double mu0, + ae_int_t n, + ae_int_t& info, + real_1d_array& x, + real_1d_array& w, + const xparams _params = alglib::xdefault); + +
    + - +
     
    /************************************************************************* -Calculation of the value of the Legendre polynomial Pn. +Calculation of the value of the Hermite polynomial. Parameters: n - degree, n>=0 x - argument Result: - the value of the Legendre polynomial Pn at x + the value of the Hermite polynomial Hn at x *************************************************************************/ -
    double alglib::legendrecalculate(ae_int_t n, double x); +
    double alglib::hermitecalculate( + ae_int_t n, + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Representation of Pn as C[0] + C[1]*X + ... + C[N]*X^N +Representation of Hn as C[0] + C[1]*X + ... + C[N]*X^N Input parameters: N - polynomial degree, n>=0 @@ -14033,1049 +14742,1124 @@ Output parameters: C - coefficients *************************************************************************/ -
    void alglib::legendrecoefficients(ae_int_t n, real_1d_array& c); +
    void alglib::hermitecoefficients( + ae_int_t n, + real_1d_array& c, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Summation of Legendre polynomials using Clenshaw’s recurrence formula. +Summation of Hermite polynomials using Clenshaw's recurrence formula. This routine calculates - c[0]*P0(x) + c[1]*P1(x) + ... + c[N]*PN(x) + c[0]*H0(x) + c[1]*H1(x) + ... + c[N]*HN(x) Parameters: n - degree, n>=0 x - argument Result: - the value of the Legendre polynomial at x + the value of the Hermite polynomial at x *************************************************************************/ -
    double alglib::legendresum(real_1d_array c, ae_int_t n, double x); +
    double alglib::hermitesum( + real_1d_array c, + ae_int_t n, + double x, + const xparams _params = alglib::xdefault);
    - + - +
     
    /************************************************************************* +Portable high quality random number generator state. +Initialized with HQRNDRandomize() or HQRNDSeed(). +Fields: + S1, S2 - seed values + V - precomputed value + MagicV - 'magic' value used to determine whether State structure + was correctly initialized. *************************************************************************/ -
    class lincgreport +
    class hqrndstate { - ae_int_t iterationscount; - ae_int_t nmv; - ae_int_t terminationtype; - double r2; };
    - +
     
    /************************************************************************* -This object stores state of the linear CG method. +This function generates random number from continuous distribution given +by finite sample X. -You should use ALGLIB functions to work with this object. -Never try to access its fields directly! +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample, array[N] (can be larger, in this case only + leading N elements are used). THIS ARRAY MUST BE SORTED BY + ASCENDING. + N - number of elements to use, N>=1 + +RESULT + this function returns random number from continuous distribution which + tries to approximate X as mush as possible. min(X)<=Result<=max(X). + + -- ALGLIB -- + Copyright 08.11.2011 by Bochkanov Sergey *************************************************************************/ -
    class lincgstate -{ -}; +
    double alglib::hqrndcontinuous( + hqrndstate state, + real_1d_array x, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function initializes linear CG Solver. This solver is used to solve -symmetric positive definite problems. If you want to solve nonsymmetric -(or non-positive definite) problem you may use LinLSQR solver provided by -ALGLIB. - -USAGE: -1. User initializes algorithm state with LinCGCreate() call -2. User tunes solver parameters with LinCGSetCond() and other functions -3. Optionally, user sets starting point with LinCGSetStartingPoint() -4. User calls LinCGSolveSparse() function which takes algorithm state and - SparseMatrix object. -5. User calls LinCGResults() to get solution -6. Optionally, user may call LinCGSolveSparse() again to solve another - problem with different matrix and/or right part without reinitializing - LinCGState structure. +This function generates random number from discrete distribution given by +finite sample X. -INPUT PARAMETERS: - N - problem dimension, N>0 +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample + N - number of elements to use, N>=1 -OUTPUT PARAMETERS: - State - structure which stores algorithm state +RESULT + this function returns one of the X[i] for random i=0..N-1 -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 08.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgcreate(ae_int_t n, lincgstate& state); +
    double alglib::hqrnddiscrete( + hqrndstate state, + real_1d_array x, + ae_int_t n, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -CG-solver: results. - -This function must be called after LinCGSolve - -INPUT PARAMETERS: - State - algorithm state +Random number generator: exponential distribution -OUTPUT PARAMETERS: - X - array[N], solution - Rep - optimization report: - * Rep.TerminationType completetion code: - * -5 input matrix is either not positive definite, - too large or too small - * -4 overflow/underflow during solution - (ill conditioned problem) - * 1 ||residual||<=EpsF*||b|| - * 5 MaxIts steps was taken - * 7 rounding errors prevent further progress, - best point found is returned - * Rep.IterationsCount contains iterations count - * NMV countains number of matrix-vector calculations +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 11.08.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgresults( - lincgstate state, - real_1d_array& x, - lincgreport& rep); +
    double alglib::hqrndexponential( + hqrndstate state, + double lambdav, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets stopping criteria. - -INPUT PARAMETERS: - EpsF - algorithm will be stopped if norm of residual is less than - EpsF*||b||. - MaxIts - algorithm will be stopped if number of iterations is more - than MaxIts. +Random number generator: normal numbers -OUTPUT PARAMETERS: - State - structure which stores algorithm state +This function generates one random number from normal distribution. +Its performance is equal to that of HQRNDNormal2() -NOTES: -If both EpsF and MaxIts are zero then small EpsF will be set to small -value. +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetcond(lincgstate state, double epsf, ae_int_t maxits); +
    double alglib::hqrndnormal( + hqrndstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function changes preconditioning settings of LinCGSolveSparse() -function. LinCGSolveSparse() will use diagonal of the system matrix as -preconditioner. This preconditioning mode is active by default. +Random number generator: normal numbers -INPUT PARAMETERS: - State - structure which stores algorithm state +This function generates two independent random numbers from normal +distribution. Its performance is equal to that of HQRNDNormal() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 19.11.2012 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetprecdiag(lincgstate state); +
    void alglib::hqrndnormal2( + hqrndstate state, + double& x1, + double& x2, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function changes preconditioning settings of LinCGSolveSparse() -function. By default, SolveSparse() uses diagonal preconditioner, but if -you want to use solver without preconditioning, you can call this function -which forces solver to use unit matrix for preconditioning. - -INPUT PARAMETERS: - State - structure which stores algorithm state +HQRNDState initialization with random values which come from standard +RNG. -- ALGLIB -- - Copyright 19.11.2012 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetprecunit(lincgstate state); +
    void alglib::hqrndrandomize( + hqrndstate& state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets restart frequency. By default, algorithm is restarted -after N subsequent iterations. +HQRNDState initialization with seed values -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetrestartfreq(lincgstate state, ae_int_t srf); +
    void alglib::hqrndseed( + ae_int_t s1, + ae_int_t s2, + hqrndstate& state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets frequency of residual recalculations. - -Algorithm updates residual r_k using iterative formula, but recalculates -it from scratch after each 10 iterations. It is done to avoid accumulation -of numerical errors and to stop algorithm when r_k starts to grow. - -Such low update frequence (1/10) gives very little overhead, but makes -algorithm a bit more robust against numerical errors. However, you may -change it +This function generates random integer number in [0, N) -INPUT PARAMETERS: - Freq - desired update frequency, Freq>=0. - Zero value means that no updates will be done. +1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() +2. N can be any positive number except for very large numbers: + * close to 2^31 on 32-bit systems + * close to 2^62 on 64-bit systems + An exception will be generated if N is too large. -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetrupdatefreq(lincgstate state, ae_int_t freq); +
    ae_int_t alglib::hqrnduniformi( + hqrndstate state, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets starting point. -By default, zero starting point is used. - -INPUT PARAMETERS: - X - starting point, array[N] +This function generates random real number in (0,1), +not including interval boundaries -OUTPUT PARAMETERS: - State - structure which stores algorithm state +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetstartingpoint(lincgstate state, real_1d_array x); +
    double alglib::hqrnduniformr( + hqrndstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function turns on/off reporting. - -INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not +Random number generator: random X and Y such that X^2+Y^2=1 -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinCGOptimize(). +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lincgsetxrep(lincgstate state, bool needxrep); +
    void alglib::hqrndunit2( + hqrndstate state, + double& x, + double& y, + const xparams _params = alglib::xdefault);
    - + +
    + +incompletebeta
    +invincompletebeta
    + + +
    +
     
    /************************************************************************* -Procedure for solution of A*x=b with sparse A. +Incomplete beta integral -INPUT PARAMETERS: - State - algorithm state - A - sparse matrix in the CRS format (you MUST contvert it to - CRS format by calling SparseConvertToCRS() function). - IsUpper - whether upper or lower triangle of A is used: - * IsUpper=True => only upper triangle is used and lower - triangle is not referenced at all - * IsUpper=False => only lower triangle is used and upper - triangle is not referenced at all - B - right part, array[N] +Returns incomplete beta integral of the arguments, evaluated +from zero to x. The function is defined as -RESULT: - This function returns no result. - You can get solution by calling LinCGResults() + x + - - + | (a+b) | | a-1 b-1 + ----------- | t (1-t) dt. + - - | | + | (a) | (b) - + 0 -NOTE: this function uses lightweight preconditioning - multiplication by - inverse of diag(A). If you want, you can turn preconditioning off by - calling LinCGSetPrecUnit(). However, preconditioning cost is low and - preconditioner is very important for solution of badly scaled - problems. +The domain of definition is 0 <= x <= 1. In this +implementation a and b are restricted to positive values. +The integral from x to 1 may be obtained by the symmetry +relation - -- ALGLIB -- - Copyright 14.11.2011 by Bochkanov Sergey + 1 - incbet( a, b, x ) = incbet( b, a, 1-x ). + +The integral is evaluated by a continued fraction expansion +or, when b*x is small, by a power series. + +ACCURACY: + +Tested at uniformly distributed random points (a,b,x) with a and b +in "domain" and x between 0 and 1. + Relative error +arithmetic domain # trials peak rms + IEEE 0,5 10000 6.9e-15 4.5e-16 + IEEE 0,85 250000 2.2e-13 1.7e-14 + IEEE 0,1000 30000 5.3e-12 6.3e-13 + IEEE 0,10000 250000 9.3e-11 7.1e-12 + IEEE 0,100000 10000 8.7e-10 4.8e-11 +Outputs smaller than the IEEE gradual underflow threshold +were excluded from these statistics. + +Cephes Math Library, Release 2.8: June, 2000 +Copyright 1984, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::lincgsolvesparse( - lincgstate state, - sparsematrix a, - bool isupper, - real_1d_array b); +
    double alglib::incompletebeta( + double a, + double b, + double x, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "solvers.h"
    +
    /************************************************************************* +Inverse of imcomplete beta integral -using namespace alglib; +Given y, the function finds x such that + incbet( a, b, x ) = y . -int main(int argc, char **argv) -{ - // - // This example illustrates solution of sparse linear systems with - // conjugate gradient method. - // - // Suppose that we have linear system A*x=b with sparse symmetric - // positive definite A (represented by sparsematrix object) - // [ 5 1 ] - // [ 1 7 2 ] - // A = [ 2 8 1 ] - // [ 1 4 1 ] - // [ 1 4 ] - // and right part b - // [ 7 ] - // [ 17 ] - // b = [ 14 ] - // [ 10 ] - // [ 6 ] - // and we want to solve this system using sparse linear CG. In order - // to do so, we have to create left part (sparsematrix object) and - // right part (dense array). - // - // Initially, sparse matrix is created in the Hash-Table format, - // which allows easy initialization, but do not allow matrix to be - // used in the linear solvers. So after construction you should convert - // sparse matrix to CRS format (one suited for linear operations). - // - // It is important to note that in our example we initialize full - // matrix A, both lower and upper triangles. However, it is symmetric - // and sparse solver needs just one half of the matrix. So you may - // save about half of the space by filling only one of the triangles. - // - sparsematrix a; - sparsecreate(5, 5, a); - sparseset(a, 0, 0, 5.0); - sparseset(a, 0, 1, 1.0); - sparseset(a, 1, 0, 1.0); - sparseset(a, 1, 1, 7.0); - sparseset(a, 1, 2, 2.0); - sparseset(a, 2, 1, 2.0); - sparseset(a, 2, 2, 8.0); - sparseset(a, 2, 3, 1.0); - sparseset(a, 3, 2, 1.0); - sparseset(a, 3, 3, 4.0); - sparseset(a, 3, 4, 1.0); - sparseset(a, 4, 3, 1.0); - sparseset(a, 4, 4, 4.0); - - // - // Now our matrix is fully initialized, but we have to do one more - // step - convert it from Hash-Table format to CRS format (see - // documentation on sparse matrices for more information about these - // formats). - // - // If you omit this call, ALGLIB will generate exception on the first - // attempt to use A in linear operations. - // - sparseconverttocrs(a); +The routine performs interval halving or Newton iterations to find the +root of incbet(a,b,x) - y = 0. - // - // Initialization of the right part - // - real_1d_array b = "[7,17,14,10,6]"; - // - // Now we have to create linear solver object and to use it for the - // solution of the linear system. - // - // NOTE: lincgsolvesparse() accepts additional parameter which tells - // what triangle of the symmetric matrix should be used - upper - // or lower. Because we've filled both parts of the matrix, we - // can use any part - upper or lower. - // - lincgstate s; - lincgreport rep; - real_1d_array x; - lincgcreate(5, s); - lincgsolvesparse(s, a, true, b); - lincgresults(s, x, rep); +ACCURACY: - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [1.000,2.000,1.000,2.000,1.000] - return 0; -} + Relative error: + x a,b +arithmetic domain domain # trials peak rms + IEEE 0,1 .5,10000 50000 5.8e-12 1.3e-13 + IEEE 0,1 .25,100 100000 1.8e-13 3.9e-15 + IEEE 0,1 0,5 50000 1.1e-12 5.5e-15 +With a and b constrained to half-integer or integer values: + IEEE 0,1 .5,10000 50000 5.8e-12 1.1e-13 + IEEE 0,1 .5,100 100000 1.7e-14 7.9e-16 +With a = .5, b constrained to half-integer or integer values: + IEEE 0,1 .5,10000 10000 8.3e-11 1.0e-11 +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1996, 2000 by Stephen L. Moshier +*************************************************************************/ +
    double alglib::invincompletebeta( + double a, + double b, + double y, + const xparams _params = alglib::xdefault); -
    + + - + +
    +
    /************************************************************************* +Builder object used to generate IDW (Inverse Distance Weighting) model. +*************************************************************************/ +
    class idwbuilder +{ +}; + +
    +
     
    /************************************************************************* +Buffer object which is used to perform evaluation requests in the +multithreaded mode (multiple threads working with same IDW object). +This object should be created with idwcreatecalcbuffer(). *************************************************************************/ -
    class linlsqrreport +
    class idwcalcbuffer { - ae_int_t iterationscount; - ae_int_t nmv; - ae_int_t terminationtype; };
    - +
     
    /************************************************************************* -This object stores state of the LinLSQR method. +IDW (Inverse Distance Weighting) model object. +*************************************************************************/ +
    class idwmodel +{ +}; -You should use ALGLIB functions to work with this object. +
    + +
    +
    /************************************************************************* +IDW fitting report: + rmserror RMS error + avgerror average error + maxerror maximum error + r2 coefficient of determination, R-squared, 1-RSS/TSS *************************************************************************/ -
    class linlsqrstate +
    class idwreport { + double rmserror; + double avgerror; + double maxerror; + double r2; };
    - +
     
    /************************************************************************* -This function initializes linear LSQR Solver. This solver is used to solve -non-symmetric (and, possibly, non-square) problems. Least squares solution -is returned for non-compatible systems. +This subroutine creates builder object used to generate IDW model from +irregularly sampled (scattered) dataset. Multidimensional scalar/vector- +-valued are supported. -USAGE: -1. User initializes algorithm state with LinLSQRCreate() call -2. User tunes solver parameters with LinLSQRSetCond() and other functions -3. User calls LinLSQRSolveSparse() function which takes algorithm state - and SparseMatrix object. -4. User calls LinLSQRResults() to get solution -5. Optionally, user may call LinLSQRSolveSparse() again to solve another - problem with different matrix and/or right part without reinitializing - LinLSQRState structure. +Builder object is used to fit model to data as follows: +* builder object is created with idwbuildercreate() function +* dataset is added with idwbuildersetpoints() function +* one of the modern IDW algorithms is chosen with either: + * idwbuildersetalgomstab() - Multilayer STABilized algorithm (interpolation) + Alternatively, one of the textbook algorithms can be chosen (not recommended): + * idwbuildersetalgotextbookshepard() - textbook Shepard algorithm + * idwbuildersetalgotextbookmodshepard()-textbook modified Shepard algorithm +* finally, model construction is performed with idwfit() function. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - M - number of rows in A - N - number of variables, N>0 + NX - dimensionality of the argument, NX>=1 + NY - dimensionality of the function being modeled, NY>=1; + NY=1 corresponds to classic scalar function, NY>=1 corresponds + to vector-valued function. OUTPUT PARAMETERS: - State - structure which stores algorithm state + State- builder object - -- ALGLIB -- - Copyright 30.11.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrcreate(ae_int_t m, ae_int_t n, linlsqrstate& state); +
    void alglib::idwbuildercreate( + ae_int_t nx, + ae_int_t ny, + idwbuilder& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -LSQR solver: results. +This function sets IDW model construction algorithm to the Multilayer +Stabilized IDW method (IDW-MSTAB), a latest incarnation of the inverse +distance weighting interpolation which fixes shortcomings of the original +and modified Shepard's variants. + +The distinctive features of IDW-MSTAB are: +1) exact interpolation is pursued (as opposed to fitting and noise + suppression) +2) improved robustness when compared with that of other algorithms: + * MSTAB shows almost no strange fitting artifacts like ripples and + sharp spikes (unlike N-dimensional splines and HRBFs) + * MSTAB does not return function values far from the interval spanned + by the dataset; say, if all your points have |f|<=1, you can be sure + that model value won't deviate too much from [-1,+1] +3) good model construction time competing with that of HRBFs and bicubic + splines +4) ability to work with any number of dimensions, starting from NX=1 + +The drawbacks of IDW-MSTAB (and all IDW algorithms in general) are: +1) dependence of the model evaluation time on the search radius +2) bad extrapolation properties, models built by this method are usually + conservative in their predictions + +Thus, IDW-MSTAB is a good "default" option if you want to perform +scattered multidimensional interpolation. Although it has its drawbacks, +it is easy to use and robust, which makes it a good first step. -This function must be called after LinLSQRSolve INPUT PARAMETERS: - State - algorithm state + State - builder object + SRad - initial search radius, SRad>0 is required. A model value + is obtained by "smart" averaging of the dataset points + within search radius. -OUTPUT PARAMETERS: - X - array[N], solution - Rep - optimization report: - * Rep.TerminationType completetion code: - * 1 ||Rk||<=EpsB*||B|| - * 4 ||A^T*Rk||/(||A||*||Rk||)<=EpsA - * 5 MaxIts steps was taken - * 7 rounding errors prevent further progress, - X contains best point found so far. - (sometimes returned on singular systems) - * Rep.IterationsCount contains iterations count - * NMV countains number of matrix-vector calculations +NOTE 1: IDW interpolation can correctly handle ANY dataset, including + datasets with non-distinct points. In case non-distinct points are + found, an average value for this point will be calculated. + +NOTE 2: the memory requirements for model storage are O(NPoints*NLayers). + The model construction needs twice as much memory as model storage. + +NOTE 3: by default 16 IDW layers are built which is enough for most cases. + You can change this parameter with idwbuildersetnlayers() method. + Larger values may be necessary if you need to reproduce extrafine + details at distances smaller than SRad/65536. Smaller value may + be necessary if you have to save memory and computing time, and + ready to sacrifice some model quality. + + +ALGORITHM DESCRIPTION + +ALGLIB implementation of IDW is somewhat similar to the modified Shepard's +method (one with search radius R) but overcomes several of its drawbacks, +namely: +1) a tendency to show stepwise behavior for uniform datasets +2) a tendency to show terrible interpolation properties for highly + nonuniform datasets which often arise in geospatial tasks + (function values are densely sampled across multiple separated + "tracks") + +IDW-MSTAB method performs several passes over dataset and builds a sequence +of progressively refined IDW models (layers), which starts from one with +largest search radius SRad and continues to smaller search radii until +required number of layers is built. Highest layers reproduce global +behavior of the target function at larger distances whilst lower layers +reproduce fine details at smaller distances. + +Each layer is an IDW model built with following modifications: +* weights go to zero when distance approach to the current search radius +* an additional regularizing term is added to the distance: w=1/(d^2+lambda) +* an additional fictional term with unit weight and zero function value is + added in order to promote continuity properties at the isolated and + boundary points + +By default, 16 layers is built, which is enough for most cases. You can +change this parameter with idwbuildersetnlayers() method. -- ALGLIB -- - Copyright 30.11.2011 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrresults( - linlsqrstate state, - real_1d_array& x, - linlsqrreport& rep); +
    void alglib::idwbuildersetalgomstab( + idwbuilder state, + double srad, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function sets stopping criteria. +This function sets IDW model construction algorithm to the 'textbook' +modified Shepard's algorithm with user-specified search radius. -INPUT PARAMETERS: - EpsA - algorithm will be stopped if ||A^T*Rk||/(||A||*||Rk||)<=EpsA. - EpsB - algorithm will be stopped if ||Rk||<=EpsB*||B|| - MaxIts - algorithm will be stopped if number of iterations - more than MaxIts. +IMPORTANT: we do NOT recommend using textbook IDW algorithms because they + have terrible interpolation properties. Use MSTAB in all cases. -OUTPUT PARAMETERS: - State - structure which stores algorithm state +INPUT PARAMETERS: + State - builder object + R - search radius -NOTE: if EpsA,EpsB,EpsC and MaxIts are zero then these variables will -be setted as default values. +NOTE 1: IDW interpolation can correctly handle ANY dataset, including + datasets with non-distinct points. In case non-distinct points are + found, an average value for this point will be calculated. -- ALGLIB -- - Copyright 30.11.2011 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrsetcond( - linlsqrstate state, - double epsa, - double epsb, - ae_int_t maxits); +
    void alglib::idwbuildersetalgotextbookmodshepard( + idwbuilder state, + double r, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets optional Tikhonov regularization coefficient. -It is zero by default. +This function sets IDW model construction algorithm to the textbook +Shepard's algorithm with custom (user-specified) power parameter. + +IMPORTANT: we do NOT recommend using textbook IDW algorithms because they + have terrible interpolation properties. Use MSTAB in all cases. INPUT PARAMETERS: - LambdaI - regularization factor, LambdaI>=0 + State - builder object + P - power parameter, P>0; good value to start with is 2.0 -OUTPUT PARAMETERS: - State - structure which stores algorithm state +NOTE 1: IDW interpolation can correctly handle ANY dataset, including + datasets with non-distinct points. In case non-distinct points are + found, an average value for this point will be calculated. -- ALGLIB -- - Copyright 30.11.2011 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrsetlambdai(linlsqrstate state, double lambdai); +
    void alglib::idwbuildersetalgotextbookshepard( + idwbuilder state, + double p, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function changes preconditioning settings of LinCGSolveSparse() -function. LinCGSolveSparse() will use diagonal of the system matrix as -preconditioner. This preconditioning mode is active by default. +This function sets constant prior term (model value at infinity). + +Constant prior term is determined as mean value over dataset. INPUT PARAMETERS: - State - structure which stores algorithm state + S - spline builder -- ALGLIB -- - Copyright 19.11.2012 by Bochkanov Sergey + Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrsetprecdiag(linlsqrstate state); +
    void alglib::idwbuildersetconstterm( + idwbuilder state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function changes preconditioning settings of LinLSQQSolveSparse() -function. By default, SolveSparse() uses diagonal preconditioner, but if -you want to use solver without preconditioning, you can call this function -which forces solver to use unit matrix for preconditioning. +This function changes number of layers used by IDW-MSTAB algorithm. + +The more layers you have, the finer details can be reproduced with IDW +model. The less layers you have, the less memory and CPU time is consumed +by the model. + +Memory consumption grows linearly with layers count, running time grows +sub-linearly. + +The default number of layers is 16, which allows you to reproduce details +at distance down to SRad/65536. You will rarely need to change it. INPUT PARAMETERS: - State - structure which stores algorithm state + State - builder object + NLayers - NLayers>=1, the number of layers used by the model. -- ALGLIB -- - Copyright 19.11.2012 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrsetprecunit(linlsqrstate state); +
    void alglib::idwbuildersetnlayers( + idwbuilder state, + ae_int_t nlayers, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function turns on/off reporting. +This function adds dataset to the builder object. -INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not +This function overrides results of the previous calls, i.e. multiple calls +of this function will result in only the last set being added. -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinCGOptimize(). +INPUT PARAMETERS: + State - builder object + XY - points, array[N,NX+NY]. One row corresponds to one point + in the dataset. First NX elements are coordinates, next + NY elements are function values. Array may be larger than + specified, in this case only leading [N,NX+NY] elements + will be used. + N - number of points in the dataset, N>=0. -- ALGLIB -- - Copyright 30.11.2011 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrsetxrep(linlsqrstate state, bool needxrep); +
    void alglib::idwbuildersetpoints( + idwbuilder state, + real_2d_array xy, + const xparams _params = alglib::xdefault); +void alglib::idwbuildersetpoints( + idwbuilder state, + real_2d_array xy, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Procedure for solution of A*x=b with sparse A. +This function sets prior term (model value at infinity) as user-specified +value. INPUT PARAMETERS: - State - algorithm state - A - sparse M*N matrix in the CRS format (you MUST contvert it - to CRS format by calling SparseConvertToCRS() function - BEFORE you pass it to this function). - B - right part, array[M] - -RESULT: - This function returns no result. - You can get solution by calling LinCGResults() + S - spline builder + V - value for user-defined prior -NOTE: this function uses lightweight preconditioning - multiplication by - inverse of diag(A). If you want, you can turn preconditioning off by - calling LinLSQRSetPrecUnit(). However, preconditioning cost is low - and preconditioner is very important for solution of badly scaled - problems. +NOTE: for vector-valued models all components of the prior are set to same + user-specified value -- ALGLIB -- - Copyright 30.11.2011 by Bochkanov Sergey + Copyright 29.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::linlsqrsolvesparse( - linlsqrstate state, - sparsematrix a, - real_1d_array b); +
    void alglib::idwbuildersetuserterm( + idwbuilder state, + double v, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "solvers.h"
    +
    /************************************************************************* +This function sets zero prior term (model value at infinity). -using namespace alglib; +INPUT PARAMETERS: + S - spline builder + -- ALGLIB -- + Copyright 29.10.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::idwbuildersetzeroterm( + idwbuilder state, + const xparams _params = alglib::xdefault); -int main(int argc, char **argv) -{ - // - // This example illustrates solution of sparse linear least squares problem - // with LSQR algorithm. - // - // Suppose that we have least squares problem min|A*x-b| with sparse A - // represented by sparsematrix object - // [ 1 1 ] - // [ 1 1 ] - // A = [ 2 1 ] - // [ 1 ] - // [ 1 ] - // and right part b - // [ 4 ] - // [ 2 ] - // b = [ 4 ] - // [ 1 ] - // [ 2 ] - // and we want to solve this system in the least squares sense using - // LSQR algorithm. In order to do so, we have to create left part - // (sparsematrix object) and right part (dense array). - // - // Initially, sparse matrix is created in the Hash-Table format, - // which allows easy initialization, but do not allow matrix to be - // used in the linear solvers. So after construction you should convert - // sparse matrix to CRS format (one suited for linear operations). - // - sparsematrix a; - sparsecreate(5, 2, a); - sparseset(a, 0, 0, 1.0); - sparseset(a, 0, 1, 1.0); - sparseset(a, 1, 0, 1.0); - sparseset(a, 1, 1, 1.0); - sparseset(a, 2, 0, 2.0); - sparseset(a, 2, 1, 1.0); - sparseset(a, 3, 0, 1.0); - sparseset(a, 4, 1, 1.0); +
    + +
    +
    /************************************************************************* +This function calculates values of the IDW model at the given point. - // - // Now our matrix is fully initialized, but we have to do one more - // step - convert it from Hash-Table format to CRS format (see - // documentation on sparse matrices for more information about these - // formats). - // - // If you omit this call, ALGLIB will generate exception on the first - // attempt to use A in linear operations. - // - sparseconverttocrs(a); +This is general function which can be used for arbitrary NX (dimension of +the space of arguments) and NY (dimension of the function itself). However +when you have NY=1 you may find more convenient to use idwcalc1(), +idwcalc2() or idwcalc3(). - // - // Initialization of the right part - // - real_1d_array b = "[4,2,4,1,2]"; +NOTE: this function modifies internal temporaries of the IDW model, thus + IT IS NOT THREAD-SAFE! If you want to perform parallel model + evaluation from the multiple threads, use idwtscalcbuf() with per- + thread buffer object. - // - // Now we have to create linear solver object and to use it for the - // solution of the linear system. - // - linlsqrstate s; - linlsqrreport rep; - real_1d_array x; - linlsqrcreate(5, 2, s); - linlsqrsolvesparse(s, a, b); - linlsqrresults(s, x, rep); +INPUT PARAMETERS: + S - IDW model + X - coordinates, array[NX]. X may have more than NX elements, + in this case only leading NX will be used. - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [1.000,2.000] - return 0; -} +OUTPUT PARAMETERS: + Y - function value, array[NY]. Y is out-parameter and will be + reallocated after call to this function. In case you want + to reuse previously allocated Y, you may use idwcalcbuf(), + which reallocates Y only when it is too small. + -- ALGLIB -- + Copyright 22.10.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::idwcalc( + idwmodel s, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); -
    -
    - -linearmodel
    -lrreport
    - -lravgerror
    -lravgrelerror
    -lrbuild
    -lrbuilds
    -lrbuildz
    -lrbuildzs
    -lrpack
    -lrprocess
    -lrrmserror
    -lrunpack
    - - - -
    linreg_d_basic Linear regression used to build the very basic model and unpack coefficients
    - + +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* +IDW interpolation: scalar target, 1-dimensional argument + +NOTE: this function modifies internal temporaries of the IDW model, thus + IT IS NOT THREAD-SAFE! If you want to perform parallel model + evaluation from the multiple threads, use idwtscalcbuf() with per- + thread buffer object. + +INPUT PARAMETERS: + S - IDW interpolant built with IDW builder + X0 - argument value + +Result: + IDW interpolant S(X0) + -- ALGLIB -- + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    class linearmodel -{ -}; +
    double alglib::idwcalc1( + idwmodel s, + double x0, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -LRReport structure contains additional information about linear model: -* C - covariation matrix, array[0..NVars,0..NVars]. - C[i,j] = Cov(A[i],A[j]) -* RMSError - root mean square error on a training set -* AvgError - average error on a training set -* AvgRelError - average relative error on a training set (excluding - observations with zero function value). -* CVRMSError - leave-one-out cross-validation estimate of - generalization error. Calculated using fast algorithm - with O(NVars*NPoints) complexity. -* CVAvgError - cross-validation estimate of average error -* CVAvgRelError - cross-validation estimate of average relative error +IDW interpolation: scalar target, 2-dimensional argument -All other fields of the structure are intended for internal use and should -not be used outside ALGLIB. +NOTE: this function modifies internal temporaries of the IDW model, thus + IT IS NOT THREAD-SAFE! If you want to perform parallel model + evaluation from the multiple threads, use idwtscalcbuf() with per- + thread buffer object. + +INPUT PARAMETERS: + S - IDW interpolant built with IDW builder + X0, X1 - argument value + +Result: + IDW interpolant S(X0,X1) + + -- ALGLIB -- + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    class lrreport -{ - real_2d_array c; - double rmserror; - double avgerror; - double avgrelerror; - double cvrmserror; - double cvavgerror; - double cvavgrelerror; - ae_int_t ncvdefects; - integer_1d_array cvdefects; -}; +
    double alglib::idwcalc2( + idwmodel s, + double x0, + double x1, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Average error on the test set +IDW interpolation: scalar target, 3-dimensional argument + +NOTE: this function modifies internal temporaries of the IDW model, thus + IT IS NOT THREAD-SAFE! If you want to perform parallel model + evaluation from the multiple threads, use idwtscalcbuf() with per- + thread buffer object. INPUT PARAMETERS: - LM - linear model - XY - test set - NPoints - test set size + S - IDW interpolant built with IDW builder + X0,X1,X2- argument value -RESULT: - average error. +Result: + IDW interpolant S(X0,X1,X2) -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    double alglib::lravgerror( - linearmodel lm, - real_2d_array xy, - ae_int_t npoints); +
    double alglib::idwcalc3( + idwmodel s, + double x0, + double x1, + double x2, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -RMS error on the test set +This function calculates values of the IDW model at the given point. + +Same as idwcalc(), but does not reallocate Y when in is large enough to +store function values. + +NOTE: this function modifies internal temporaries of the IDW model, thus + IT IS NOT THREAD-SAFE! If you want to perform parallel model + evaluation from the multiple threads, use idwtscalcbuf() with per- + thread buffer object. INPUT PARAMETERS: - LM - linear model - XY - test set - NPoints - test set size + S - IDW model + X - coordinates, array[NX]. X may have more than NX elements, + in this case only leading NX will be used. + Y - possibly preallocated array -RESULT: - average relative error. +OUTPUT PARAMETERS: + Y - function value, array[NY]. Y is not reallocated when it + is larger than NY. -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    double alglib::lravgrelerror( - linearmodel lm, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::idwcalcbuf( + idwmodel s, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Linear regression +This function creates buffer structure which can be used to perform +parallel IDW model evaluations (with one IDW model instance being +used from multiple threads, as long as different threads use different +instances of buffer). -Subroutine builds model: +This buffer object can be used with idwtscalcbuf() function (here "ts" +stands for "thread-safe", "buf" is a suffix which denotes function which +reuses previously allocated output space). - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) +How to use it: +* create IDW model structure or load it from file +* call idwcreatecalcbuffer(), once per thread working with IDW model (you + should call this function only AFTER model initialization, see below for + more information) +* call idwtscalcbuf() from different threads, with each thread working + with its own copy of buffer object. -and model found in ALGLIB format, covariation matrix, training set errors -(rms, average, average relative) and leave-one-out cross-validation -estimate of the generalization error. CV estimate calculated using fast -algorithm with O(NPoints*NVars) complexity. +INPUT PARAMETERS + S - IDW model -When covariation matrix is calculated standard deviations of function -values are assumed to be equal to RMS error on the training set. +OUTPUT PARAMETERS + Buf - external buffer. -INPUT PARAMETERS: - XY - training set, array [0..NPoints-1,0..NVars]: - * NVars columns - independent variables - * last column - dependent variable - NPoints - training set size, NPoints>NVars+1 - NVars - number of independent variables -OUTPUT PARAMETERS: - Info - return code: - * -255, in case of unknown internal error - * -4, if internal SVD subroutine haven't converged - * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1). - * 1, if subroutine successfully finished - LM - linear model in the ALGLIB format. Use subroutines of - this unit to work with the model. - AR - additional results +IMPORTANT: buffer object should be used only with IDW model object which + was used to initialize buffer. Any attempt to use buffer with + different object is dangerous - you may get memory violation + error because sizes of internal arrays do not fit to dimensions + of the IDW structure. +IMPORTANT: you should call this function only for model which was built + with model builder (or unserialized from file). Sizes of some + internal structures are determined only after model is built, + so buffer object created before model construction stage will + be useless (and any attempt to use it will result in exception). -- ALGLIB -- - Copyright 02.08.2008 by Bochkanov Sergey + Copyright 22.10.2018 by Sergey Bochkanov *************************************************************************/ -
    void alglib::lrbuild( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t& info, - linearmodel& lm, - lrreport& ar); +
    void alglib::idwcreatecalcbuffer( + idwmodel s, + idwcalcbuffer& buf, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Linear regression - -Variant of LRBuild which uses vector of standatd deviations (errors in -function values). +This function fits IDW model to the dataset using current IDW construction +algorithm. A model being built and fitting report are returned. INPUT PARAMETERS: - XY - training set, array [0..NPoints-1,0..NVars]: - * NVars columns - independent variables - * last column - dependent variable - S - standard deviations (errors in function values) - array[0..NPoints-1], S[i]>0. - NPoints - training set size, NPoints>NVars+1 - NVars - number of independent variables + State - builder object OUTPUT PARAMETERS: - Info - return code: - * -255, in case of unknown internal error - * -4, if internal SVD subroutine haven't converged - * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1). - * -2, if S[I]<=0 - * 1, if subroutine successfully finished - LM - linear model in the ALGLIB format. Use subroutines of - this unit to work with the model. - AR - additional results + Model - an IDW model built with current algorithm + Rep - model fitting report, fields of this structure contain + information about average fitting errors. +NOTE: although IDW-MSTAB algorithm is an interpolation method, i.e. it + tries to fit the model exactly, it can handle datasets with non- + distinct points which can not be fit exactly; in such cases least- + squares fitting is performed. -- ALGLIB -- - Copyright 02.08.2008 by Bochkanov Sergey + Copyright 22.10.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lrbuilds( - real_2d_array xy, - real_1d_array s, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t& info, - linearmodel& lm, - lrreport& ar); +
    void alglib::idwfit( + idwbuilder state, + idwmodel& model, + idwreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Like LRBuild but builds model - - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] - -i.e. with zero constant term. +This function serializes data structure to string. - -- ALGLIB -- - Copyright 30.10.2008 by Bochkanov Sergey +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. *************************************************************************/ -
    void alglib::lrbuildz( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t& info, - linearmodel& lm, - lrreport& ar); - +
    void idwserialize(idwmodel &obj, std::string &s_out); +void idwserialize(idwmodel &obj, std::ostream &s_out);
    - +
     
    /************************************************************************* -Like LRBuildS, but builds model - - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] - -i.e. with zero constant term. - - -- ALGLIB -- - Copyright 30.10.2008 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::lrbuildzs( - real_2d_array xy, - real_1d_array s, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t& info, - linearmodel& lm, - lrreport& ar); +This function calculates values of the IDW model at the given point, using +external buffer object (internal temporaries of IDW model are not +modified). -
    - -
    -
    /************************************************************************* -"Packs" coefficients and creates linear model in ALGLIB format (LRUnpack -reversed). +This function allows to use same IDW model object in different threads, +assuming that different threads use different instances of the buffer +structure. INPUT PARAMETERS: - V - coefficients, array[0..NVars] - NVars - number of independent variables + S - IDW model, may be shared between different threads + Buf - buffer object created for this particular instance of IDW + model with idwcreatecalcbuffer(). + X - coordinates, array[NX]. X may have more than NX elements, + in this case only leading NX will be used. + Y - possibly preallocated array -OUTPUT PAREMETERS: - LM - linear model. +OUTPUT PARAMETERS: + Y - function value, array[NY]. Y is not reallocated when it + is larger than NY. -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lrpack(real_1d_array v, ae_int_t nvars, linearmodel& lm); +
    void alglib::idwtscalcbuf( + idwmodel s, + idwcalcbuffer buf, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Procesing - -INPUT PARAMETERS: - LM - linear model - X - input vector, array[0..NVars-1]. - -Result: - value of linear model regression estimate - - -- ALGLIB -- - Copyright 03.09.2008 by Bochkanov Sergey +This function unserializes data structure from string. *************************************************************************/ -
    double alglib::lrprocess(linearmodel lm, real_1d_array x); - +
    void idwunserialize(const std::string &s_in, idwmodel &obj); +void idwunserialize(const std::istream &s_in, idwmodel &obj);
    - +
    -
    /************************************************************************* -RMS error on the test set +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS: - LM - linear model - XY - test set - NPoints - test set size +using namespace alglib; -RESULT: - root mean square error. - -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::lrrmserror( - linearmodel lm, - real_2d_array xy, - ae_int_t npoints); +int main(int argc, char **argv) +{ + // + // This example illustrates basic concepts of the IDW models: + // creation and evaluation. + // + // Suppose that we have set of 2-dimensional points with associated + // scalar function values, and we want to build an IDW model using + // our data. + // + // NOTE: we can work with N-dimensional models and vector-valued functions too :) + // + // Typical sequence of steps is given below: + // 1. we create IDW builder object + // 2. we attach our dataset to the IDW builder and tune algorithm settings + // 3. we generate IDW model + // 4. we use IDW model instance (evaluate, serialize, etc.) + // + double v; -
    - -
    -
    /************************************************************************* -Unpacks coefficients of linear model. + // + // Step 1: IDW builder creation. + // + // We have to specify dimensionality of the space (2 or 3) and + // dimensionality of the function (scalar or vector). + // + // New builder object is empty - it has not dataset and uses + // default model construction settings + // + idwbuilder builder; + idwbuildercreate(2, 1, builder); -INPUT PARAMETERS: - LM - linear model in ALGLIB format + // + // Step 2: dataset addition + // + // XY contains two points - x0=(-1,0) and x1=(+1,0) - + // and two function values f(x0)=2, f(x1)=3. + // + real_2d_array xy = "[[-1,0,2],[+1,0,3]]"; + idwbuildersetpoints(builder, xy); -OUTPUT PARAMETERS: - V - coefficients, array[0..NVars] - constant term (intercept) is stored in the V[NVars]. - NVars - number of independent variables (one less than number - of coefficients) + // + // Step 3: choose IDW algorithm and generate model + // + // We use modified stabilized IDW algorithm with following parameters: + // * SRad - set to 5.0 (search radius must be large enough) + // + // IDW-MSTAB algorithm is a state-of-the-art implementation of IDW which + // is competitive with RBFs and bicubic splines. See comments on the + // idwbuildersetalgomstab() function for more information. + // + idwmodel model; + idwreport rep; + idwbuildersetalgomstab(builder, 5.0); + idwfit(builder, model, rep); - -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::lrunpack(linearmodel lm, real_1d_array& v, ae_int_t& nvars); + // + // Step 4: model was built, evaluate its value + // + v = idwcalc2(model, 1.0, 0.0); + printf("%.2f\n", double(v)); // EXPECTED: 3.000 + return 0; +} -
    -

    Examples:   [1]  

    - + +
     #include "stdafx.h"
     #include <stdlib.h>
     #include <stdio.h>
     #include <math.h>
    -#include "dataanalysis.h"
    +#include "interpolation.h"
     
     using namespace alglib;
     
    @@ -15083,3675 +15867,3522 @@
     int main(int argc, char **argv)
     {
         //
    -    // In this example we demonstrate linear fitting by f(x|a) = a*exp(0.5*x).
    +    // This example shows how to serialize and unserialize IDW model.
    +    // 
    +    // Suppose that we have set of 2-dimensional points with associated
    +    // scalar function values, and we have built an IDW model using
    +    // our data.
         //
    -    // We have:
    -    // * xy - matrix of basic function values (exp(0.5*x)) and expected values
    +    // This model can be serialized to string or stream. ALGLIB supports
    +    // flexible (un)serialization, i.e. you can move serialized model
    +    // representation between different machines (32-bit or 64-bit),
    +    // different CPU architectures (x86/64, ARM) or even different
    +    // programming languages supported by ALGLIB (C#, C++, ...).
         //
    -    real_2d_array xy = "[[0.606531,1.133719],[0.670320,1.306522],[0.740818,1.504604],[0.818731,1.554663],[0.904837,1.884638],[1.000000,2.072436],[1.105171,2.257285],[1.221403,2.534068],[1.349859,2.622017],[1.491825,2.897713],[1.648721,3.219371]]";
    -    ae_int_t info;
    -    ae_int_t nvars;
    -    linearmodel model;
    -    lrreport rep;
    -    real_1d_array c;
    +    // Our first step is to build model, evaluate it at point (1,0),
    +    // and serialize it to string.
    +    //
    +    std::string s;
    +    double v;
    +    real_2d_array xy = "[[-1,0,2],[+1,0,3]]";
    +    idwbuilder builder;
    +    idwmodel model;
    +    idwmodel model2;
    +    idwreport rep;
    +    idwbuildercreate(2, 1, builder);
    +    idwbuildersetpoints(builder, xy);
    +    idwbuildersetalgomstab(builder, 5.0);
    +    idwfit(builder, model, rep);
    +    v = idwcalc2(model, 1.0, 0.0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 3.000
     
    -    lrbuildz(xy, 11, 1, info, model, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 1
    -    lrunpack(model, c, nvars);
    -    printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [1.98650,0.00000]
    +    //
    +    // Serialization + unserialization to a different instance
    +    // of the model class.
    +    //
    +    alglib::idwserialize(model, s);
    +    alglib::idwunserialize(s, model2);
    +
    +    //
    +    // Evaluate unserialized model at the same point
    +    //
    +    v = idwcalc2(model2, 1.0, 0.0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 3.000
         return 0;
     }
     
     
    -
    + - +
     
    /************************************************************************* +Incomplete gamma integral + +The function is defined by + + x + - + 1 | | -t a-1 + igam(a,x) = ----- | e t dt. + - | | + | (a) - + 0 + + +In this implementation both arguments must be positive. +The integral is evaluated by either a power series or +continued fraction expansion, depending on the relative +values of a and x. +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE 0,30 200000 3.6e-14 2.9e-15 + IEEE 0,100 300000 9.9e-14 1.5e-14 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1985, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    class logitmodel -{ -}; +
    double alglib::incompletegamma( + double a, + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -MNLReport structure contains information about training process: -* NGrad - number of gradient calculations -* NHess - number of Hessian calculations +Complemented incomplete gamma integral + +The function is defined by + + + igamc(a,x) = 1 - igam(a,x) + + inf. + - + 1 | | -t a-1 + = ----- | e t dt. + - | | + | (a) - + x + + +In this implementation both arguments must be positive. +The integral is evaluated by either a power series or +continued fraction expansion, depending on the relative +values of a and x. + +ACCURACY: + +Tested at random a, x. + a x Relative error: +arithmetic domain domain # trials peak rms + IEEE 0.5,100 0,100 200000 1.9e-14 1.7e-15 + IEEE 0.01,0.5 0,100 200000 1.4e-13 1.6e-15 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1985, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    class mnlreport -{ - ae_int_t ngrad; - ae_int_t nhess; -}; +
    double alglib::incompletegammac( + double a, + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average cross-entropy (in bits per element) on the test set +Inverse of complemented imcomplete gamma integral -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size +Given p, the function finds x such that -RESULT: - CrossEntropy/(NPoints*ln(2)). + igamc( a, x ) = p. - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey +Starting with the approximate value + + 3 + x = a t + + where + + t = 1 - d - ndtri(p) sqrt(d) + +and + + d = 1/9a, + +the routine performs up to 10 Newton iterations to find the +root of igamc(a,x) - p = 0. + +ACCURACY: + +Tested at random a, p in the intervals indicated. + + a p Relative error: +arithmetic domain domain # trials peak rms + IEEE 0.5,100 0,0.5 100000 1.0e-14 1.7e-15 + IEEE 0.01,0.5 0,0.5 100000 9.0e-14 3.4e-15 + IEEE 0.5,10000 0,0.5 20000 2.3e-13 3.8e-14 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::mnlavgce( - logitmodel lm, - real_2d_array xy, - ae_int_t npoints); +
    double alglib::invincompletegammac( + double a, + double y0, + const xparams _params = alglib::xdefault);
    - + + +
     
    /************************************************************************* -Average error on the test set +This function is left for backward compatibility. +Use fitspheremc() instead. -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size - -RESULT: - average error (error when estimating posterior probabilities). -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mnlavgerror( - logitmodel lm, +
    void alglib::nsfitspheremcc( real_2d_array xy, - ae_int_t npoints); + ae_int_t npoints, + ae_int_t nx, + real_1d_array& cx, + double& rhi, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average relative error on the test set - -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size - -RESULT: - average relative error (error when estimating posterior probabilities). +This function is left for backward compatibility. +Use fitspheremi() instead. -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mnlavgrelerror( - logitmodel lm, +
    void alglib::nsfitspheremic( real_2d_array xy, - ae_int_t ssize); + ae_int_t npoints, + ae_int_t nx, + real_1d_array& cx, + double& rlo, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Classification error on test set = MNLRelClsError*NPoints +This function is left for backward compatibility. +Use fitspheremz() instead. -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::mnlclserror( - logitmodel lm, +
    void alglib::nsfitspheremzc( real_2d_array xy, - ae_int_t npoints); + ae_int_t npoints, + ae_int_t nx, + real_1d_array& cx, + double& rlo, + double& rhi, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -"Packs" coefficients and creates logit model in ALGLIB format (MNLUnpack -reversed). - -INPUT PARAMETERS: - A - model (see MNLUnpack) - NVars - number of independent variables - NClasses - number of classes - -OUTPUT PARAMETERS: - LM - logit model. +This function is left for backward compatibility. +Use fitspherex() instead. -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey + Copyright 14.04.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mnlpack( - real_2d_array a, - ae_int_t nvars, - ae_int_t nclasses, - logitmodel& lm); +
    void alglib::nsfitspherex( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nx, + ae_int_t problemtype, + double epsx, + ae_int_t aulits, + double penalty, + real_1d_array& cx, + double& rlo, + double& rhi, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Procesing +This function is an obsolete and deprecated version of fitting by +penalized cubic spline. -INPUT PARAMETERS: - LM - logit model, passed by non-constant reference - (some fields of structure are used as temporaries - when calculating model output). - X - input vector, array[0..NVars-1]. - Y - (possibly) preallocated buffer; if size of Y is less than - NClasses, it will be reallocated.If it is large enough, it - is NOT reallocated, so we can save some time on reallocation. +It was superseded by spline1dfit(), which is an orders of magnitude faster +and more memory-efficient implementation. -OUTPUT PARAMETERS: - Y - result, array[0..NClasses-1] - Vector of posterior probabilities for classification task. +Do NOT use this function in the new code! - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mnlprocess(logitmodel lm, real_1d_array x, real_1d_array& y); +
    void alglib::spline1dfitpenalized( + real_1d_array x, + real_1d_array y, + ae_int_t m, + double rho, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfitpenalized( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t m, + double rho, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -'interactive' variant of MNLProcess for languages like Python which -support constructs like "Y = MNLProcess(LM,X)" and interactive mode of the -interpreter +This function is an obsolete and deprecated version of fitting by +penalized cubic spline. -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +It was superseded by spline1dfit(), which is an orders of magnitude faster +and more memory-efficient implementation. - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey +Do NOT use this function in the new code! + + -- ALGLIB PROJECT -- + Copyright 19.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mnlprocessi( - logitmodel lm, +
    void alglib::spline1dfitpenalizedw( + real_1d_array x, + real_1d_array y, + real_1d_array w, + ae_int_t m, + double rho, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfitpenalizedw( real_1d_array x, - real_1d_array& y); + real_1d_array y, + real_1d_array w, + ae_int_t n, + ae_int_t m, + double rho, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    + + +
     
    /************************************************************************* -Relative classification error on the test set +Inverse matrix update by the Sherman-Morrison formula -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size +The algorithm updates matrix A^-1 when adding a vector to a column +of matrix A. -RESULT: - percent of incorrectly classified cases. +Input parameters: + InvA - inverse of matrix A. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + UpdColumn - the column of A whose vector U was added. + 0 <= UpdColumn <= N-1 + U - the vector to be added to a column. + Array whose index ranges within [0..N-1]. + +Output parameters: + InvA - inverse of modified matrix A. -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mnlrelclserror( - logitmodel lm, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::rmatrixinvupdatecolumn( + real_2d_array& inva, + ae_int_t n, + ae_int_t updcolumn, + real_1d_array u, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -RMS error on the test set +Inverse matrix update by the Sherman-Morrison formula -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size +The algorithm updates matrix A^-1 when adding a vector to a row +of matrix A. -RESULT: - root mean square error (error when estimating posterior probabilities). +Input parameters: + InvA - inverse of matrix A. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + UpdRow - the row of A whose vector V was added. + 0 <= Row <= N-1 + V - the vector to be added to a row. + Array whose index ranges within [0..N-1]. + +Output parameters: + InvA - inverse of modified matrix A. -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mnlrmserror( - logitmodel lm, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::rmatrixinvupdaterow( + real_2d_array& inva, + ae_int_t n, + ae_int_t updrow, + real_1d_array v, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine trains logit model. +Inverse matrix update by the Sherman-Morrison formula -INPUT PARAMETERS: - XY - training set, array[0..NPoints-1,0..NVars] - First NVars columns store values of independent - variables, next column stores number of class (from 0 - to NClasses-1) which dataset element belongs to. Fractional - values are rounded to nearest integer. - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - number of classes, NClasses>=2 +The algorithm updates matrix A^-1 when adding a number to an element +of matrix A. + +Input parameters: + InvA - inverse of matrix A. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + UpdRow - row where the element to be updated is stored. + UpdColumn - column where the element to be updated is stored. + UpdVal - a number to be added to the element. -OUTPUT PARAMETERS: - Info - return code: - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints<NVars+2, NVars<1, NClasses<2). - * 1, if task has been solved - LM - model built - Rep - training report + +Output parameters: + InvA - inverse of modified matrix A. -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mnltrainh( - real_2d_array xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t& info, - logitmodel& lm, - mnlreport& rep); +
    void alglib::rmatrixinvupdatesimple( + real_2d_array& inva, + ae_int_t n, + ae_int_t updrow, + ae_int_t updcolumn, + double updval, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Unpacks coefficients of logit model. Logit model have form: +Inverse matrix update by the Sherman-Morrison formula - P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1)) - S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), when i<M-1 - S(M-1) = 1 +The algorithm computes the inverse of matrix A+u*v' by using the given matrix +A^-1 and the vectors u and v. -INPUT PARAMETERS: - LM - logit model in ALGLIB format +Input parameters: + InvA - inverse of matrix A. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + U - the vector modifying the matrix. + Array whose index ranges within [0..N-1]. + V - the vector modifying the matrix. + Array whose index ranges within [0..N-1]. -OUTPUT PARAMETERS: - V - coefficients, array[0..NClasses-2,0..NVars] - NVars - number of independent variables - NClasses - number of classes +Output parameters: + InvA - inverse of matrix A + u*v'. -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mnlunpack( - logitmodel lm, - real_2d_array& a, - ae_int_t& nvars, - ae_int_t& nclasses); +
    void alglib::rmatrixinvupdateuv( + real_2d_array& inva, + ae_int_t n, + real_1d_array u, + real_1d_array v, + const xparams _params = alglib::xdefault);
    - +
    - -barycentricfitreport
    -lsfitreport
    -lsfitstate
    -polynomialfitreport
    -spline1dfitreport
    -barycentricfitfloaterhormann
    -barycentricfitfloaterhormannwc
    -logisticcalc4
    -logisticcalc5
    -logisticfit4
    -logisticfit45x
    -logisticfit4ec
    -logisticfit5
    -logisticfit5ec
    -lsfitcreatef
    -lsfitcreatefg
    -lsfitcreatefgh
    -lsfitcreatewf
    -lsfitcreatewfg
    -lsfitcreatewfgh
    -lsfitfit
    -lsfitlinear
    -lsfitlinearc
    -lsfitlinearw
    -lsfitlinearwc
    -lsfitresults
    -lsfitsetbc
    -lsfitsetcond
    -lsfitsetgradientcheck
    -lsfitsetscale
    -lsfitsetstpmax
    -lsfitsetxrep
    -lstfitpiecewiselinearrdp
    -lstfitpiecewiselinearrdpfixed
    -polynomialfit
    -polynomialfitwc
    -spline1dfitcubic
    -spline1dfitcubicwc
    -spline1dfithermite
    -spline1dfithermitewc
    -spline1dfitpenalized
    -spline1dfitpenalizedw
    +jacobianellipticfunctions
    - - - - - - - - - - - -
    lsfit_d_lin Unconstrained (general) linear least squares fitting with and without weights
    lsfit_d_linc Constrained (general) linear least squares fitting with and without weights
    lsfit_d_nlf Nonlinear fitting using function value only
    lsfit_d_nlfb Bound contstrained nonlinear fitting using function value only
    lsfit_d_nlfg Nonlinear fitting using gradient
    lsfit_d_nlfgh Nonlinear fitting using gradient and Hessian
    lsfit_d_nlscale Nonlinear fitting with custom scaling and bound constraints
    lsfit_d_pol Unconstrained polynomial fitting
    lsfit_d_polc Constrained polynomial fitting
    lsfit_d_spline Unconstrained fitting by penalized regression spline
    lsfit_t_4pl 4-parameter logistic fitting
    lsfit_t_5pl 5-parameter logistic fitting
    - +
     
    /************************************************************************* -Barycentric fitting report: - RMSError RMS error - AvgError average error - AvgRelError average relative error (for non-zero Y[I]) - MaxError maximum error - TaskRCond reciprocal of task's condition number +Jacobian Elliptic Functions + +Evaluates the Jacobian elliptic functions sn(u|m), cn(u|m), +and dn(u|m) of parameter m between 0 and 1, and real +argument u. + +These functions are periodic, with quarter-period on the +real axis equal to the complete elliptic integral +ellpk(1.0-m). + +Relation to incomplete elliptic integral: +If u = ellik(phi,m), then sn(u|m) = sin(phi), +and cn(u|m) = cos(phi). Phi is called the amplitude of u. + +Computation is by means of the arithmetic-geometric mean +algorithm, except when m is within 1e-9 of 0 or 1. In the +latter case with m close to 1, the approximation applies +only for phi < pi/2. + +ACCURACY: + +Tested at random points with u between 0 and 10, m between +0 and 1. + + Absolute error (* = relative error): +arithmetic function # trials peak rms + IEEE phi 10000 9.2e-16* 1.4e-16* + IEEE sn 50000 4.1e-15 4.6e-16 + IEEE cn 40000 3.6e-15 4.4e-16 + IEEE dn 10000 1.3e-12 1.8e-14 + + Peak error observed in consistency check using addition +theorem for sn(u+v) was 4e-16 (absolute). Also tested by +the above relation to the incomplete elliptic integral. +Accuracy deteriorates when u is large. + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 2000 by Stephen L. Moshier *************************************************************************/ -
    class barycentricfitreport -{ - double taskrcond; - ae_int_t dbest; - double rmserror; - double avgerror; - double avgrelerror; - double maxerror; -}; +
    void alglib::jacobianellipticfunctions( + double u, + double m, + double& sn, + double& cn, + double& dn, + double& ph, + const xparams _params = alglib::xdefault);
    - + +
    + +jarqueberatest
    + + +
    +
     
    /************************************************************************* -Least squares fitting report. This structure contains informational fields -which are set by fitting functions provided by this unit. +Jarque-Bera test -Different functions initialize different sets of fields, so you should -read documentation on specific function you used in order to know which -fields are initialized. +This test checks hypotheses about the fact that a given sample X is a +sample of normal random variable. - TaskRCond reciprocal of task's condition number - IterationsCount number of internal iterations +Requirements: + * the number of elements in the sample is not less than 5. - VarIdx if user-supplied gradient contains errors which were - detected by nonlinear fitter, this field is set to - index of the first component of gradient which is - suspected to be spoiled by bugs. +Input parameters: + X - sample. Array whose index goes from 0 to N-1. + N - size of the sample. N>=5 - RMSError RMS error - AvgError average error - AvgRelError average relative error (for non-zero Y[I]) - MaxError maximum error +Output parameters: + P - p-value for the test - WRMSError weighted RMS error +Accuracy of the approximation used (5<=N<=1951): - CovPar covariance matrix for parameters, filled by some solvers - ErrPar vector of errors in parameters, filled by some solvers - ErrCurve vector of fit errors - variability of the best-fit - curve, filled by some solvers. - Noise vector of per-point noise estimates, filled by - some solvers. - R2 coefficient of determination (non-weighted, non-adjusted), - filled by some solvers. +p-value relative error (5<=N<=1951) +[1, 0.1] < 1% +[0.1, 0.01] < 2% +[0.01, 0.001] < 6% +[0.001, 0] wasn't measured + +For N>1951 accuracy wasn't measured but it shouldn't be sharply different +from table values. + + -- ALGLIB -- + Copyright 09.04.2007 by Bochkanov Sergey *************************************************************************/ -
    class lsfitreport +
    void alglib::jarqueberatest( + real_1d_array x, + ae_int_t n, + double& p, + const xparams _params = alglib::xdefault); + +
    + + + +
    +
    /************************************************************************* +Buffer object which is used to perform various requests (usually model +inference) in the multithreaded mode (multiple threads working with same +KNN object). + +This object should be created with KNNCreateBuffer(). +*************************************************************************/ +
    class knnbuffer { - double taskrcond; - ae_int_t iterationscount; - ae_int_t varidx; - double rmserror; - double avgerror; - double avgrelerror; - double maxerror; - double wrmserror; - real_2d_array covpar; - real_1d_array errpar; - real_1d_array errcurve; - real_1d_array noise; - double r2; };
    - +
     
    /************************************************************************* -Nonlinear fitter. - -You should use ALGLIB functions to work with fitter. -Never try to access its fields directly! +A KNN builder object; this object encapsulates dataset and all related +settings, it is used to create an actual instance of KNN model. *************************************************************************/ -
    class lsfitstate +
    class knnbuilder { };
    - +
     
    /************************************************************************* -Polynomial fitting report: - TaskRCond reciprocal of task's condition number - RMSError RMS error - AvgError average error - AvgRelError average relative error (for non-zero Y[I]) - MaxError maximum error +KNN model, can be used for classification or regression *************************************************************************/ -
    class polynomialfitreport +
    class knnmodel { - double taskrcond; - double rmserror; - double avgerror; - double avgrelerror; - double maxerror; };
    - +
     
    /************************************************************************* -Spline fitting report: - RMSError RMS error - AvgError average error - AvgRelError average relative error (for non-zero Y[I]) - MaxError maximum error +KNN training report. -Fields below are filled by obsolete functions (Spline1DFitCubic, -Spline1DFitHermite). Modern fitting functions do NOT fill these fields: - TaskRCond reciprocal of task's condition number +Following fields store training set errors: +* relclserror - fraction of misclassified cases, [0,1] +* avgce - average cross-entropy in bits per symbol +* rmserror - root-mean-square error +* avgerror - average error +* avgrelerror - average relative error + +For classification problems: +* RMS, AVG and AVGREL errors are calculated for posterior probabilities + +For regression problems: +* RELCLS and AVGCE errors are zero *************************************************************************/ -
    class spline1dfitreport +
    class knnreport { - double taskrcond; + double relclserror; + double avgce; double rmserror; double avgerror; double avgrelerror; - double maxerror; };
    - +
     
    /************************************************************************* -Rational least squares fitting using Floater-Hormann rational functions -with optimal D chosen from [0,9]. +Calculates all kinds of errors for the model in one call. -Equidistant grid with M node on [min(x),max(x)] is used to build basis -functions. Different values of D are tried, optimal D (least root mean -square error) is chosen. Task is linear, so linear least squares solver -is used. Complexity of this computational scheme is O(N*M^2) (mostly -dominated by the least squares solver). +INPUT PARAMETERS: + Model - KNN model + XY - test set: + * one row per point + * first NVars columns store independent variables + * depending on problem type: + * next column stores class number in [0,NClasses) - for + classification problems + * next NOut columns store dependent variables - for + regression problems + NPoints - test set size, NPoints>=0 -COMMERCIAL EDITION OF ALGLIB: +OUTPUT PARAMETERS: + Rep - following fields are loaded with errors for both regression + and classification models: + * rep.rmserror - RMS error for the output + * rep.avgerror - average error + * rep.avgrelerror - average relative error + following fields are set only for classification models, + zero for regression ones: + * relclserror - relative classification error, in [0,1] + * avgce - average cross-entropy in bits per dataset entry - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +NOTE: the cross-entropy metric is too unstable when used to evaluate KNN + models (such models can report exactly zero probabilities), so we + do not recommend using it. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::knnallerrors( + knnmodel model, + real_2d_array xy, + ae_int_t npoints, + knnreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average cross-entropy (in bits per element) on the test set INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - N - number of points, N>0. - M - number of basis functions ( = number_of_nodes), M>=2. + Model - KNN model + XY - test set + NPoints - test set size -OUTPUT PARAMETERS: - Info- same format as in LSFitLinearWC() subroutine. - * Info>0 task is solved - * Info<=0 an error occured: - -4 means inconvergence of internal SVD - -3 means inconsistent constraints - B - barycentric interpolant. - Rep - report, same format as in LSFitLinearWC() subroutine. - Following fields are set: - * DBest best value of the D parameter - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED +RESULT: + CrossEntropy/NPoints. + Zero if model solves regression task. - -- ALGLIB PROJECT -- - Copyright 18.08.2009 by Bochkanov Sergey +NOTE: the cross-entropy metric is too unstable when used to evaluate KNN + models (such models can report exactly zero probabilities), so we + do not recommend using it. + +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentricfitfloaterhormann( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& b, - barycentricfitreport& rep); -void alglib::smp_barycentricfitfloaterhormann( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& b, - barycentricfitreport& rep); +
    double alglib::knnavgce( + knnmodel model, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Weghted rational least squares fitting using Floater-Hormann rational -functions with optimal D chosen from [0,9], with constraints and -individual weights. +Average error on the test set -Equidistant grid with M node on [min(x),max(x)] is used to build basis -functions. Different values of D are tried, optimal D (least WEIGHTED root -mean square error) is chosen. Task is linear, so linear least squares -solver is used. Complexity of this computational scheme is O(N*M^2) -(mostly dominated by the least squares solver). +Its meaning for regression task is obvious. As for classification problems, +average error means error when estimating posterior probabilities. -SEE ALSO -* BarycentricFitFloaterHormann(), "lightweight" fitting without invididual - weights and constraints. +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size -COMMERCIAL EDITION OF ALGLIB: +RESULT: + average error - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::knnavgerror( + knnmodel model, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average relative error on the test set + +Its meaning for regression task is obvious. As for classification problems, +average relative error means error when estimating posterior probabilities. + +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size + +RESULT: + average relative error + +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::knnavgrelerror( + knnmodel model, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine builds KNN model according to current settings, using +dataset internally stored in the builder object. + +The model being built performs inference using Eps-approximate K nearest +neighbors search algorithm, with: +* K=1, Eps=0 corresponding to the "nearest neighbor algorithm" +* K>1, Eps=0 corresponding to the "K nearest neighbors algorithm" +* K>=1, Eps>0 corresponding to "approximate nearest neighbors algorithm" + +An approximate KNN is a good option for high-dimensional datasets (exact +KNN works slowly when dimensions count grows). + +An ALGLIB implementation of kd-trees is used to perform k-nn searches. + + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - W - weights, array[0..N-1] - Each summand in square sum of approximation deviations from - given values is multiplied by the square of corresponding - weight. Fill it by 1's if you don't want to solve weighted - task. - N - number of points, N>0. - XC - points where function values/derivatives are constrained, - array[0..K-1]. - YC - values of constraints, array[0..K-1] - DC - array[0..K-1], types of constraints: - * DC[i]=0 means that S(XC[i])=YC[i] - * DC[i]=1 means that S'(XC[i])=YC[i] - SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS - K - number of constraints, 0<=K<M. - K=0 means no constraints (XC/YC/DC are not used in such cases) - M - number of basis functions ( = number_of_nodes), M>=2. + S - KNN builder object + K - number of neighbors to search for, K>=1 + Eps - approximation factor: + * Eps=0 means that exact kNN search is performed + * Eps>0 means that (1+Eps)-approximate search is performed OUTPUT PARAMETERS: - Info- same format as in LSFitLinearWC() subroutine. - * Info>0 task is solved - * Info<=0 an error occured: - -4 means inconvergence of internal SVD - -3 means inconsistent constraints - -1 means another errors in parameters passed - (N<=0, for example) - B - barycentric interpolant. - Rep - report, same format as in LSFitLinearWC() subroutine. - Following fields are set: - * DBest best value of the D parameter - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED + Model - KNN model + Rep - report -IMPORTANT: - this subroutine doesn't calculate task's condition number for K<>0. - -SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: - -Setting constraints can lead to undesired results, like ill-conditioned -behavior, or inconsistency being detected. From the other side, it allows -us to improve quality of the fit. Here we summarize our experience with -constrained barycentric interpolants: -* excessive constraints can be inconsistent. Floater-Hormann basis - functions aren't as flexible as splines (although they are very smooth). -* the more evenly constraints are spread across [min(x),max(x)], the more - chances that they will be consistent -* the greater is M (given fixed constraints), the more chances that - constraints will be consistent -* in the general case, consistency of constraints IS NOT GUARANTEED. -* in the several special cases, however, we CAN guarantee consistency. -* one of this cases is constraints on the function VALUES at the interval - boundaries. Note that consustency of the constraints on the function - DERIVATIVES is NOT guaranteed (you can use in such cases cubic splines - which are more flexible). -* another special case is ONE constraint on the function value (OR, but - not AND, derivative) anywhere in the interval - -Our final recommendation is to use constraints WHEN AND ONLY WHEN you -can't solve your task without them. Anything beyond special cases given -above is not guaranteed and may result in inconsistency. - - -- ALGLIB PROJECT -- - Copyright 18.08.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentricfitfloaterhormannwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& b, - barycentricfitreport& rep); -void alglib::smp_barycentricfitfloaterhormannwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, +
    void alglib::knnbuilderbuildknnmodel( + knnbuilder s, ae_int_t k, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& b, - barycentricfitreport& rep); + double eps, + knnmodel& model, + knnreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function calculates value of four-parameter logistic (4PL) model at -specified point X. 4PL model has following form: +This subroutine creates KNNBuilder object which is used to train KNN models. - F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) +By default, new builder stores empty dataset and some reasonable default +settings. At the very least, you should specify dataset prior to building +KNN model. You can also tweak settings of the model construction algorithm +(recommended, although default settings should work well). + +Following actions are mandatory: +* calling knnbuildersetdataset() to specify dataset +* calling knnbuilderbuildknnmodel() to build KNN model using current + dataset and default settings + +Additionally, you may call: +* knnbuildersetnorm() to change norm being used INPUT PARAMETERS: - X - current point, X>=0: - * zero X is correctly handled even for B<=0 - * negative X results in exception. - A, B, C, D- parameters of 4PL model: - * A is unconstrained - * B is unconstrained; zero or negative values are handled - correctly. - * C>0, non-positive value results in exception - * D is unconstrained + none -RESULT: - model value at X +OUTPUT PARAMETERS: + S - KNN builder -NOTE: if B=0, denominator is assumed to be equal to 2.0 even for zero X - (strictly speaking, 0^0 is undefined). + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::knnbuildercreate( + knnbuilder& s, + const xparams _params = alglib::xdefault); -NOTE: this function also throws exception if all input parameters are - correct, but overflow was detected during calculations. +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +Specifies classification problem (two or more classes are predicted). +There also exists "regression" version of this function. -NOTE: this function performs a lot of checks; if you need really high - performance, consider evaluating model yourself, without checking - for degenerate cases. +This subroutine adds dense dataset to the internal storage of the builder +object. Specifying your dataset in the dense format means that the dense +version of the KNN construction algorithm will be invoked. +INPUT PARAMETERS: + S - KNN builder object + XY - array[NPoints,NVars+1] (note: actual size can be + larger, only leading part is used anyway), dataset: + * first NVars elements of each row store values of the + independent variables + * next element stores class index, in [0,NClasses) + NPoints - number of rows in the dataset, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 - -- ALGLIB PROJECT -- - Copyright 14.05.2014 by Bochkanov Sergey +OUTPUT PARAMETERS: + S - KNN builder + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    double alglib::logisticcalc4( - double x, - double a, - double b, - double c, - double d); +
    void alglib::knnbuildersetdatasetcls( + knnbuilder s, + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function calculates value of five-parameter logistic (5PL) model at -specified point X. 5PL model has following form: +Specifies regression problem (one or more continuous output variables are +predicted). There also exists "classification" version of this function. - F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) +This subroutine adds dense dataset to the internal storage of the builder +object. Specifying your dataset in the dense format means that the dense +version of the KNN construction algorithm will be invoked. INPUT PARAMETERS: - X - current point, X>=0: - * zero X is correctly handled even for B<=0 - * negative X results in exception. - A, B, C, D, G- parameters of 5PL model: - * A is unconstrained - * B is unconstrained; zero or negative values are handled - correctly. - * C>0, non-positive value results in exception - * D is unconstrained - * G>0, non-positive value results in exception + S - KNN builder object + XY - array[NPoints,NVars+NOut] (note: actual size can be + larger, only leading part is used anyway), dataset: + * first NVars elements of each row store values of the + independent variables + * next NOut elements store values of the dependent + variables + NPoints - number of rows in the dataset, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NOut - number of dependent variables, NOut>=1 -RESULT: - model value at X +OUTPUT PARAMETERS: + S - KNN builder -NOTE: if B=0, denominator is assumed to be equal to Power(2.0,G) even for - zero X (strictly speaking, 0^0 is undefined). + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::knnbuildersetdatasetreg( + knnbuilder s, + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nout, + const xparams _params = alglib::xdefault); -NOTE: this function also throws exception if all input parameters are - correct, but overflow was detected during calculations. +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets norm type used for neighbor search. -NOTE: this function performs a lot of checks; if you need really high - performance, consider evaluating model yourself, without checking - for degenerate cases. +INPUT PARAMETERS: + S - decision forest builder object + NormType - norm type: + * 0 inf-norm + * 1 1-norm + * 2 Euclidean norm (default) +OUTPUT PARAMETERS: + S - decision forest builder - -- ALGLIB PROJECT -- - Copyright 14.05.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    double alglib::logisticcalc5( - double x, - double a, - double b, - double c, - double d, - double g); +
    void alglib::knnbuildersetnorm( + knnbuilder s, + ae_int_t nrmtype, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function fits four-parameter logistic (4PL) model to data provided -by user. 4PL model has following form: +This function returns most probable class number for an input X. It is +same as calling knnprocess(model,x,y), then determining i=argmax(y[i]) and +returning i. - F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) +A class number in [0,NOut) range in returned for classification problems, +-1 is returned when this function is called for regression problems. -Here: - * A, D - unconstrained (see LogisticFit4EC() for constrained 4PL) - * B>=0 - * C>0 +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. -IMPORTANT: output of this function is constrained in such way that B>0. - Because 4PL model is symmetric with respect to B, there is no - need to explore B<0. Constraining B makes algorithm easier - to stabilize and debug. - Users who for some reason prefer to work with negative B's - should transform output themselves (swap A and D, replace B by - -B). + Use knntsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. -4PL fitting is implemented as follows: -* we perform small number of restarts from random locations which helps to - solve problem of bad local extrema. Locations are only partially random - - we use input data to determine good initial guess, but we include - controlled amount of randomness. -* we perform Levenberg-Marquardt fitting with very tight constraints on - parameters B and C - it allows us to find good initial guess for the - second stage without risk of running into "flat spot". -* second Levenberg-Marquardt round is performed without excessive - constraints. Results from the previous round are used as initial guess. -* after fitting is done, we compare results with best values found so far, - rewrite "best solution" if needed, and move to next random location. +INPUT PARAMETERS: + Model - KNN model + X - input vector, array[0..NVars-1]. -Overall algorithm is very stable and is not prone to bad local extrema. -Furthermore, it automatically scales when input data have very large or -very small range. +RESULT: + class number, -1 for regression tasks -INPUT PARAMETERS: - X - array[N], stores X-values. - MUST include only non-negative numbers (but may include - zero values). Can be unsorted. - Y - array[N], values to fit. - N - number of points. If N is less than length of X/Y, only - leading N elements are used. + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::knnclassify( + knnmodel model, + real_1d_array x, + const xparams _params = alglib::xdefault); -OUTPUT PARAMETERS: - A, B, C, D- parameters of 4PL model - Rep - fitting report. This structure has many fields, but ONLY - ONES LISTED BELOW ARE SET: - * Rep.IterationsCount - number of iterations performed - * Rep.RMSError - root-mean-square error - * Rep.AvgError - average absolute error - * Rep.AvgRelError - average relative error (calculated for - non-zero Y-values) - * Rep.MaxError - maximum absolute error - * Rep.R2 - coefficient of determination, R-squared. This - coefficient is calculated as R2=1-RSS/TSS (in case - of nonlinear regression there are multiple ways to - define R2, each of them giving different results). +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function creates buffer structure which can be used to perform +parallel KNN requests. -NOTE: after you obtained coefficients, you can evaluate model with - LogisticCalc4() function. +KNN subpackage provides two sets of computing functions - ones which use +internal buffer of KNN model (these functions are single-threaded because +they use same buffer, which can not shared between threads), and ones +which use external buffer. -NOTE: if you need better control over fitting process than provided by this - function, you may use LogisticFit45X(). +This function is used to initialize external buffer. -NOTE: step is automatically scaled according to scale of parameters being - fitted before we compare its length with EpsX. Thus, this function - can be used to fit data with very small or very large values without - changing EpsX. +INPUT PARAMETERS + Model - KNN model which is associated with newly created buffer +OUTPUT PARAMETERS + Buf - external buffer. - -- ALGLIB PROJECT -- - Copyright 14.02.2014 by Bochkanov Sergey + +IMPORTANT: buffer object should be used only with model which was used to + initialize buffer. Any attempt to use buffer with different + object is dangerous - you may get integrity check failure + (exception) because sizes of internal arrays do not fit to + dimensions of the model structure. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::logisticfit4( - real_1d_array x, - real_1d_array y, - ae_int_t n, - double& a, - double& b, - double& c, - double& d, - lsfitreport& rep); +
    void alglib::knncreatebuffer( + knnmodel model, + knnbuffer& buf, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This is "expert" 4PL/5PL fitting function, which can be used if you need -better control over fitting process than provided by LogisticFit4() or -LogisticFit5(). +Inference using KNN model. -This function fits model of the form - - F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) (4PL model) - -or +See also knnprocess0(), knnprocessi() and knnclassify() for options with a +bit more convenient interface. - F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) (5PL model) +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. -Here: - * A, D - unconstrained - * B>=0 for 4PL, unconstrained for 5PL - * C>0 - * G>0 (if present) + Use knntsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. INPUT PARAMETERS: - X - array[N], stores X-values. - MUST include only non-negative numbers (but may include - zero values). Can be unsorted. - Y - array[N], values to fit. - N - number of points. If N is less than length of X/Y, only - leading N elements are used. - CnstrLeft- optional equality constraint for model value at the left - boundary (at X=0). Specify NAN (Not-a-Number) if you do - not need constraint on the model value at X=0 (in C++ you - can pass alglib::fp_nan as parameter, in C# it will be - Double.NaN). - See below, section "EQUALITY CONSTRAINTS" for more - information about constraints. - CnstrRight- optional equality constraint for model value at X=infinity. - Specify NAN (Not-a-Number) if you do not need constraint - on the model value (in C++ you can pass alglib::fp_nan as - parameter, in C# it will be Double.NaN). - See below, section "EQUALITY CONSTRAINTS" for more - information about constraints. - Is4PL - whether 4PL or 5PL models are fitted - LambdaV - regularization coefficient, LambdaV>=0. - Set it to zero unless you know what you are doing. - EpsX - stopping condition (step size), EpsX>=0. - Zero value means that small step is automatically chosen. - See notes below for more information. - RsCnt - number of repeated restarts from random points. 4PL/5PL - models are prone to problem of bad local extrema. Utilizing - multiple random restarts allows us to improve algorithm - convergence. - RsCnt>=0. - Zero value means that function automatically choose small - amount of restarts (recommended). + Model - KNN model + X - input vector, array[0..NVars-1]. + Y - possible preallocated buffer. Reused if long enough. OUTPUT PARAMETERS: - A, B, C, D- parameters of 4PL model - G - parameter of 5PL model; for Is4PL=True, G=1 is returned. - Rep - fitting report. This structure has many fields, but ONLY - ONES LISTED BELOW ARE SET: - * Rep.IterationsCount - number of iterations performed - * Rep.RMSError - root-mean-square error - * Rep.AvgError - average absolute error - * Rep.AvgRelError - average relative error (calculated for - non-zero Y-values) - * Rep.MaxError - maximum absolute error - * Rep.R2 - coefficient of determination, R-squared. This - coefficient is calculated as R2=1-RSS/TSS (in case - of nonlinear regression there are multiple ways to - define R2, each of them giving different results). + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. -NOTE: after you obtained coefficients, you can evaluate model with - LogisticCalc5() function. + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::knnprocess( + knnmodel model, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); -NOTE: step is automatically scaled according to scale of parameters being - fitted before we compare its length with EpsX. Thus, this function - can be used to fit data with very small or very large values without - changing EpsX. +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function returns first component of the inferred vector (i.e. one +with index #0). -EQUALITY CONSTRAINTS ON PARAMETERS +It is a convenience wrapper for knnprocess() intended for either: +* 1-dimensional regression problems +* 2-class classification problems -4PL/5PL solver supports equality constraints on model values at the left -boundary (X=0) and right boundary (X=infinity). These constraints are -completely optional and you can specify both of them, only one - or no -constraints at all. +In the former case this function returns inference result as scalar, which +is definitely more convenient that wrapping it as vector. In the latter +case it returns probability of object belonging to class #0. -Parameter CnstrLeft contains left constraint (or NAN for unconstrained -fitting), and CnstrRight contains right one. For 4PL, left constraint -ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on -D. That's because 4PL model is normalized in such way that B>=0. +If you call it for anything different from two cases above, it will work +as defined, i.e. return y[0], although it is of less use in such cases. -For 5PL model things are different. Unlike 4PL one, 5PL model is NOT -symmetric with respect to change in sign of B. Thus, negative B's are -possible, and left constraint may constrain parameter A (for positive B's) -- or parameter D (for negative B's). Similarly changes meaning of right -constraint. +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. -You do not have to decide what parameter to constrain - algorithm will -automatically determine correct parameters as fitting progresses. However, -question highlighted above is important when you interpret fitting results. + Use knntsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. + +INPUT PARAMETERS: + Model - KNN model + X - input vector, array[0..NVars-1]. +RESULT: + Y[0] - -- ALGLIB PROJECT -- - Copyright 14.02.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::logisticfit45x( +
    double alglib::knnprocess0( + knnmodel model, real_1d_array x, - real_1d_array y, - ae_int_t n, - double cnstrleft, - double cnstrright, - bool is4pl, - double lambdav, - double epsx, - ae_int_t rscnt, - double& a, - double& b, - double& c, - double& d, - double& g, - lsfitreport& rep); + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function fits four-parameter logistic (4PL) model to data provided -by user, with optional constraints on parameters A and D. 4PL model has -following form: +'interactive' variant of knnprocess() for languages like Python which +support constructs like "y = knnprocessi(model,x)" and interactive mode of +the interpreter. - F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -Here: - * A, D - with optional equality constraints - * B>=0 - * C>0 +IMPORTANT: this function is thread-unsafe and may modify internal + structures of the model! You can not use same model object for + parallel evaluation from several threads. -IMPORTANT: output of this function is constrained in such way that B>0. - Because 4PL model is symmetric with respect to B, there is no - need to explore B<0. Constraining B makes algorithm easier - to stabilize and debug. - Users who for some reason prefer to work with negative B's - should transform output themselves (swap A and D, replace B by - -B). + Use knntsprocess() with independent thread-local buffers if + you need thread-safe evaluation. -4PL fitting is implemented as follows: -* we perform small number of restarts from random locations which helps to - solve problem of bad local extrema. Locations are only partially random - - we use input data to determine good initial guess, but we include - controlled amount of randomness. -* we perform Levenberg-Marquardt fitting with very tight constraints on - parameters B and C - it allows us to find good initial guess for the - second stage without risk of running into "flat spot". -* second Levenberg-Marquardt round is performed without excessive - constraints. Results from the previous round are used as initial guess. -* after fitting is done, we compare results with best values found so far, - rewrite "best solution" if needed, and move to next random location. + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::knnprocessi( + knnmodel model, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); -Overall algorithm is very stable and is not prone to bad local extrema. -Furthermore, it automatically scales when input data have very large or -very small range. +
    + +
    +
    /************************************************************************* +Relative classification error on the test set INPUT PARAMETERS: - X - array[N], stores X-values. - MUST include only non-negative numbers (but may include - zero values). Can be unsorted. - Y - array[N], values to fit. - N - number of points. If N is less than length of X/Y, only - leading N elements are used. - CnstrLeft- optional equality constraint for model value at the left - boundary (at X=0). Specify NAN (Not-a-Number) if you do - not need constraint on the model value at X=0 (in C++ you - can pass alglib::fp_nan as parameter, in C# it will be - Double.NaN). - See below, section "EQUALITY CONSTRAINTS" for more - information about constraints. - CnstrRight- optional equality constraint for model value at X=infinity. - Specify NAN (Not-a-Number) if you do not need constraint - on the model value (in C++ you can pass alglib::fp_nan as - parameter, in C# it will be Double.NaN). - See below, section "EQUALITY CONSTRAINTS" for more - information about constraints. + Model - KNN model + XY - test set + NPoints - test set size -OUTPUT PARAMETERS: - A, B, C, D- parameters of 4PL model - Rep - fitting report. This structure has many fields, but ONLY - ONES LISTED BELOW ARE SET: - * Rep.IterationsCount - number of iterations performed - * Rep.RMSError - root-mean-square error - * Rep.AvgError - average absolute error - * Rep.AvgRelError - average relative error (calculated for - non-zero Y-values) - * Rep.MaxError - maximum absolute error - * Rep.R2 - coefficient of determination, R-squared. This - coefficient is calculated as R2=1-RSS/TSS (in case - of nonlinear regression there are multiple ways to - define R2, each of them giving different results). +RESULT: + percent of incorrectly classified cases. + Zero if model solves regression task. -NOTE: after you obtained coefficients, you can evaluate model with - LogisticCalc4() function. +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. -NOTE: if you need better control over fitting process than provided by this - function, you may use LogisticFit45X(). + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::knnrelclserror( + knnmodel model, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); -NOTE: step is automatically scaled according to scale of parameters being - fitted before we compare its length with EpsX. Thus, this function - can be used to fit data with very small or very large values without - changing EpsX. +
    + +
    +
    /************************************************************************* +Changing search settings of KNN model. -EQUALITY CONSTRAINTS ON PARAMETERS +K and EPS parameters of KNN (AKNN) search are specified during model +construction. However, plain KNN algorithm with Euclidean distance allows +you to change them at any moment. -4PL/5PL solver supports equality constraints on model values at the left -boundary (X=0) and right boundary (X=infinity). These constraints are -completely optional and you can specify both of them, only one - or no -constraints at all. +NOTE: future versions of KNN model may support advanced versions of KNN, + such as NCA or LMNN. It is possible that such algorithms won't allow + you to change search settings on the fly. If you call this function + for an algorithm which does not support on-the-fly changes, it will + throw an exception. -Parameter CnstrLeft contains left constraint (or NAN for unconstrained -fitting), and CnstrRight contains right one. For 4PL, left constraint -ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on -D. That's because 4PL model is normalized in such way that B>=0. +INPUT PARAMETERS: + Model - KNN model + K - K>=1, neighbors count + EPS - accuracy of the EPS-approximate NN search. Set to 0.0, if + you want to perform "classic" KNN search. Specify larger + values if you need to speed-up high-dimensional KNN + queries. +OUTPUT PARAMETERS: + nothing on success, exception on failure - -- ALGLIB PROJECT -- - Copyright 14.02.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::logisticfit4ec( - real_1d_array x, - real_1d_array y, - ae_int_t n, - double cnstrleft, - double cnstrright, - double& a, - double& b, - double& c, - double& d, - lsfitreport& rep); +
    void alglib::knnrewritekeps( + knnmodel model, + ae_int_t k, + double eps, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function fits five-parameter logistic (5PL) model to data provided -by user. 5PL model has following form: +RMS error on the test set. - F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) +Its meaning for regression task is obvious. As for classification problems, +RMS error means error when estimating posterior probabilities. -Here: - * A, D - unconstrained - * B - unconstrained - * C>0 - * G>0 +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size -IMPORTANT: unlike in 4PL fitting, output of this function is NOT - constrained in such way that B is guaranteed to be positive. - Furthermore, unlike 4PL, 5PL model is NOT symmetric with - respect to B, so you can NOT transform model to equivalent one, - with B having desired sign (>0 or <0). +RESULT: + root mean square error. -5PL fitting is implemented as follows: -* we perform small number of restarts from random locations which helps to - solve problem of bad local extrema. Locations are only partially random - - we use input data to determine good initial guess, but we include - controlled amount of randomness. -* we perform Levenberg-Marquardt fitting with very tight constraints on - parameters B and C - it allows us to find good initial guess for the - second stage without risk of running into "flat spot". Parameter G is - fixed at G=1. -* second Levenberg-Marquardt round is performed without excessive - constraints on B and C, but with G still equal to 1. Results from the - previous round are used as initial guess. -* third Levenberg-Marquardt round relaxes constraints on G and tries two - different models - one with B>0 and one with B<0. -* after fitting is done, we compare results with best values found so far, - rewrite "best solution" if needed, and move to next random location. +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. -Overall algorithm is very stable and is not prone to bad local extrema. -Furthermore, it automatically scales when input data have very large or -very small range. + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::knnrmserror( + knnmodel model, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - X - array[N], stores X-values. - MUST include only non-negative numbers (but may include - zero values). Can be unsorted. - Y - array[N], values to fit. - N - number of points. If N is less than length of X/Y, only - leading N elements are used. - -OUTPUT PARAMETERS: - A,B,C,D,G- parameters of 5PL model - Rep - fitting report. This structure has many fields, but ONLY - ONES LISTED BELOW ARE SET: - * Rep.IterationsCount - number of iterations performed - * Rep.RMSError - root-mean-square error - * Rep.AvgError - average absolute error - * Rep.AvgRelError - average relative error (calculated for - non-zero Y-values) - * Rep.MaxError - maximum absolute error - * Rep.R2 - coefficient of determination, R-squared. This - coefficient is calculated as R2=1-RSS/TSS (in case - of nonlinear regression there are multiple ways to - define R2, each of them giving different results). +
    + +
    +
    /************************************************************************* +This function serializes data structure to string. -NOTE: after you obtained coefficients, you can evaluate model with - LogisticCalc5() function. +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void knnserialize(knnmodel &obj, std::string &s_out); +void knnserialize(knnmodel &obj, std::ostream &s_out); +
    + +
    +
    /************************************************************************* +Thread-safe procesing using external buffer for temporaries. -NOTE: if you need better control over fitting process than provided by this - function, you may use LogisticFit45X(). +This function is thread-safe (i.e . you can use same KNN model from +multiple threads) as long as you use different buffer objects for different +threads. -NOTE: step is automatically scaled according to scale of parameters being - fitted before we compare its length with EpsX. Thus, this function - can be used to fit data with very small or very large values without - changing EpsX. +INPUT PARAMETERS: + Model - KNN model + Buf - buffer object, must be allocated specifically for this + model with knncreatebuffer(). + X - input vector, array[NVars] +OUTPUT PARAMETERS: + Y - result, array[NOut]. Regression estimate when solving + regression task, vector of posterior probabilities for + a classification task. - -- ALGLIB PROJECT -- - Copyright 14.02.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::logisticfit5( +
    void alglib::knntsprocess( + knnmodel model, + knnbuffer buf, real_1d_array x, - real_1d_array y, - ae_int_t n, - double& a, - double& b, - double& c, - double& d, - double& g, - lsfitreport& rep); + real_1d_array& y, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function fits five-parameter logistic (5PL) model to data provided -by user, subject to optional equality constraints on parameters A and D. -5PL model has following form: +This function unserializes data structure from string. +*************************************************************************/ +
    void knnunserialize(const std::string &s_in, knnmodel &obj); +void knnunserialize(const std::istream &s_in, knnmodel &obj); +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
     
    -    F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G)
    +using namespace alglib;
     
    -Here:
    -    * A, D - with optional equality constraints
    -    * B - unconstrained
    -    * C>0
    -    * G>0
     
    -IMPORTANT: unlike in  4PL  fitting,  output  of  this  function   is   NOT
    -           constrained in  such  way that B is guaranteed to be  positive.
    -           Furthermore,  unlike  4PL,  5PL  model  is  NOT  symmetric with
    -           respect to B, so you can NOT transform model to equivalent one,
    -           with B having desired sign (>0 or <0).
    +int main(int argc, char **argv)
    +{
    +    //
    +    // The very simple classification example: classify points (x,y) in 2D space
    +    // as ones with x>=0 and ones with x<0 (y is ignored, but our classifier
    +    // has to find out it).
    +    //
    +    // First, we have to create KNN builder object, load dataset and specify
    +    // training settings. Our dataset is specified as matrix, which has following
    +    // format:
    +    //
    +    //     x0 y0 class0
    +    //     x1 y1 class1
    +    //     x2 y2 class2
    +    //     ....
    +    //
    +    // Here xi and yi can be any values (and in fact you can have any number of
    +    // independent variables), and classi MUST be integer number in [0,NClasses)
    +    // range. In our example we denote points with x>=0 as class #0, and
    +    // ones with negative xi as class #1.
    +    //
    +    // NOTE: if you want to solve regression problem, specify dataset in similar
    +    //       format, but with dependent variable(s) instead of class labels. You
    +    //       can have dataset with multiple dependent variables, by the way!
    +    //
    +    // For the sake of simplicity, our example includes only 4-point dataset and
    +    // really simple K=1 nearest neighbor search. Industrial problems typically
    +    // need larger values of K.
    +    //
    +    knnbuilder builder;
    +    ae_int_t nvars = 2;
    +    ae_int_t nclasses = 2;
    +    ae_int_t npoints = 4;
    +    real_2d_array xy = "[[1,1,0],[1,-1,0],[-1,1,1],[-1,-1,1]]";
     
    -5PL fitting is implemented as follows:
    -* we perform small number of restarts from random locations which helps to
    -  solve problem of bad local extrema. Locations are only partially  random
    -  - we use input data to determine good  initial  guess,  but  we  include
    -  controlled amount of randomness.
    -* we perform Levenberg-Marquardt fitting with very  tight  constraints  on
    -  parameters B and C - it allows us to find good  initial  guess  for  the
    -  second stage without risk of running into "flat spot".  Parameter  G  is
    -  fixed at G=1.
    -* second  Levenberg-Marquardt  round  is   performed   without   excessive
    -  constraints on B and C, but with G still equal to 1.  Results  from  the
    -  previous round are used as initial guess.
    -* third Levenberg-Marquardt round relaxes constraints on G  and  tries  two
    -  different models - one with B>0 and one with B<0.
    -* after fitting is done, we compare results with best values found so far,
    -  rewrite "best solution" if needed, and move to next random location.
    +    knnbuildercreate(builder);
    +    knnbuildersetdatasetcls(builder, xy, npoints, nvars, nclasses);
     
    -Overall algorithm is very stable and is not prone to  bad  local  extrema.
    -Furthermore, it automatically scales when input data have  very  large  or
    -very small range.
    +    // we build KNN model with k=1 and eps=0 (exact k-nn search is performed)
    +    ae_int_t k = 1;
    +    double eps = 0;
    +    knnmodel model;
    +    knnreport rep;
    +    knnbuilderbuildknnmodel(builder, k, eps, model, rep);
    +
    +    // with such settings (k=1 is used) you can expect zero classification
    +    // error on training set. Beautiful results, but remember - in real life
    +    // you do not need zero TRAINING SET error, you need good generalization.
     
    -INPUT PARAMETERS:
    -    X       -   array[N], stores X-values.
    -                MUST include only non-negative numbers  (but  may  include
    -                zero values). Can be unsorted.
    -    Y       -   array[N], values to fit.
    -    N       -   number of points. If N is less than  length  of  X/Y, only
    -                leading N elements are used.
    -    CnstrLeft-  optional equality constraint for model value at the   left
    -                boundary (at X=0). Specify NAN (Not-a-Number)  if  you  do
    -                not need constraint on the model value at X=0 (in C++  you
    -                can pass alglib::fp_nan as parameter, in  C#  it  will  be
    -                Double.NaN).
    -                See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
    -                information about constraints.
    -    CnstrRight- optional equality constraint for model value at X=infinity.
    -                Specify NAN (Not-a-Number) if you do not  need  constraint
    -                on the model value (in C++  you can pass alglib::fp_nan as
    -                parameter, in  C# it will  be Double.NaN).
    -                See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
    -                information about constraints.
    +    printf("%.4f\n", double(rep.relclserror)); // EXPECTED: 0.0000
     
    -OUTPUT PARAMETERS:
    -    A,B,C,D,G-  parameters of 5PL model
    -    Rep     -   fitting report. This structure has many fields,  but  ONLY
    -                ONES LISTED BELOW ARE SET:
    -                * Rep.IterationsCount - number of iterations performed
    -                * Rep.RMSError - root-mean-square error
    -                * Rep.AvgError - average absolute error
    -                * Rep.AvgRelError - average relative error (calculated for
    -                  non-zero Y-values)
    -                * Rep.MaxError - maximum absolute error
    -                * Rep.R2 - coefficient of determination,  R-squared.  This
    -                  coefficient   is  calculated  as  R2=1-RSS/TSS  (in case
    -                  of nonlinear  regression  there  are  multiple  ways  to
    -                  define R2, each of them giving different results).
    +    // now, let's perform some simple processing with knnprocess()
    +    real_1d_array x = "[+1,0]";
    +    real_1d_array y = "[]";
    +    knnprocess(model, x, y);
    +    printf("%s\n", y.tostring(3).c_str()); // EXPECTED: [+1,0]
     
    -NOTE: after  you  obtained  coefficients,  you  can  evaluate  model  with
    -      LogisticCalc5() function.
    +    // another option is to use knnprocess0() which returns just first component
    +    // of the output vector y. ideal for regression problems and binary classifiers.
    +    double y0;
    +    y0 = knnprocess0(model, x);
    +    printf("%.3f\n", double(y0)); // EXPECTED: 1.000
     
    -NOTE: if you need better control over fitting process than provided by this
    -      function, you may use LogisticFit45X().
    +    // finally, you can use knnclassify() which returns most probable class index (i.e. argmax y[i]).
    +    ae_int_t i;
    +    i = knnclassify(model, x);
    +    printf("%d\n", int(i)); // EXPECTED: 0
    +    return 0;
    +}
     
    -NOTE: step is automatically scaled according to scale of parameters  being
    -      fitted before we compare its length with EpsX. Thus,  this  function
    -      can be used to fit data with very small or very large values without
    -      changing EpsX.
     
    -EQUALITY CONSTRAINTS ON PARAMETERS
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
     
    -5PL solver supports equality constraints on model  values  at   the   left
    -boundary (X=0) and right  boundary  (X=infinity).  These  constraints  are
    -completely optional and you can specify both of them, only  one  -  or  no
    -constraints at all.
    +using namespace alglib;
     
    -Parameter  CnstrLeft  contains  left  constraint (or NAN for unconstrained
    -fitting), and CnstrRight contains right  one.
     
    -Unlike 4PL one, 5PL model is NOT symmetric with respect to  change in sign
    -of B. Thus, negative B's are possible, and left constraint  may  constrain
    -parameter A (for positive B's)  -  or  parameter  D  (for  negative  B's).
    -Similarly changes meaning of right constraint.
    +int main(int argc, char **argv)
    +{
    +    //
    +    // The very simple regression example: model f(x,y)=x+y
    +    //
    +    // First, we have to create KNN builder object, load dataset and specify
    +    // training settings. Our dataset is specified as matrix, which has following
    +    // format:
    +    //
    +    //     x0 y0 f0
    +    //     x1 y1 f1
    +    //     x2 y2 f2
    +    //     ....
    +    //
    +    // Here xi and yi can be any values, and fi is a dependent function value.
    +    // By the way, with KNN algorithm you can even model functions with multiple
    +    // dependent variables!
    +    //
    +    // NOTE: you can also solve classification problems with KNN models, see
    +    //       another example for this unit.
    +    //
    +    // For the sake of simplicity, our example includes only 4-point dataset and
    +    // really simple K=1 nearest neighbor search. Industrial problems typically
    +    // need larger values of K.
    +    //
    +    knnbuilder builder;
    +    ae_int_t nvars = 2;
    +    ae_int_t nout = 1;
    +    ae_int_t npoints = 4;
    +    real_2d_array xy = "[[1,1,+2],[1,-1,0],[-1,1,0],[-1,-1,-2]]";
     
    -You do not have to decide what parameter to  constrain  -  algorithm  will
    -automatically determine correct parameters as fitting progresses. However,
    -question highlighted above is important when you interpret fitting results.
    +    knnbuildercreate(builder);
    +    knnbuildersetdatasetreg(builder, xy, npoints, nvars, nout);
     
    +    // we build KNN model with k=1 and eps=0 (exact k-nn search is performed)
    +    ae_int_t k = 1;
    +    double eps = 0;
    +    knnmodel model;
    +    knnreport rep;
    +    knnbuilderbuildknnmodel(builder, k, eps, model, rep);
    +
    +    // with such settings (k=1 is used) you can expect zero RMS error on the
    +    // training set. Beautiful results, but remember - in real life you do not
    +    // need zero TRAINING SET error, you need good generalization.
     
    -  -- ALGLIB PROJECT --
    -     Copyright 14.02.2014 by Bochkanov Sergey
    +    printf("%.4f\n", double(rep.rmserror)); // EXPECTED: 0.0000
    +
    +    // now, let's perform some simple processing with knnprocess()
    +    real_1d_array x = "[+1,+1]";
    +    real_1d_array y = "[]";
    +    knnprocess(model, x, y);
    +    printf("%s\n", y.tostring(3).c_str()); // EXPECTED: [+2]
    +
    +    // another option is to use knnprocess0() which returns just first component
    +    // of the output vector y. ideal for regression problems and binary classifiers.
    +    double y0;
    +    y0 = knnprocess0(model, x);
    +    printf("%.3f\n", double(y0)); // EXPECTED: 2.000
    +
    +    // there also exist another convenience function, knnclassify(),
    +    // but it does not work for regression problems - it always returns -1.
    +    ae_int_t i;
    +    i = knnclassify(model, x);
    +    printf("%d\n", int(i)); // EXPECTED: -1
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +Calculation of the value of the Laguerre polynomial. + +Parameters: + n - degree, n>=0 + x - argument + +Result: + the value of the Laguerre polynomial Ln at x *************************************************************************/ -
    void alglib::logisticfit5ec( - real_1d_array x, - real_1d_array y, +
    double alglib::laguerrecalculate( ae_int_t n, - double cnstrleft, - double cnstrright, - double& a, - double& b, - double& c, - double& d, - double& g, - lsfitreport& rep); + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Nonlinear least squares fitting using function values only. - -Combination of numerical differentiation and secant updates is used to -obtain function Jacobian. - -Nonlinear task min(F(c)) is solved, where +Representation of Ln as C[0] + C[1]*X + ... + C[N]*X^N - F(c) = (f(c,x[0])-y[0])^2 + ... + (f(c,x[n-1])-y[n-1])^2, +Input parameters: + N - polynomial degree, n>=0 - * N is a number of points, - * M is a dimension of a space points belong to, - * K is a dimension of a space of parameters being fitted, - * w is an N-dimensional vector of weight coefficients, - * x is a set of N points, each of them is an M-dimensional vector, - * c is a K-dimensional vector of parameters being fitted +Output parameters: + C - coefficients +*************************************************************************/ +
    void alglib::laguerrecoefficients( + ae_int_t n, + real_1d_array& c, + const xparams _params = alglib::xdefault); -This subroutine uses only f(c,x[i]). +
    + +
    +
    /************************************************************************* +Summation of Laguerre polynomials using Clenshaw's recurrence formula. -INPUT PARAMETERS: - X - array[0..N-1,0..M-1], points (one row = one point) - Y - array[0..N-1], function values. - C - array[0..K-1], initial approximation to the solution, - N - number of points, N>1 - M - dimension of space - K - number of parameters being fitted - DiffStep- numerical differentiation step; - should not be very small or large; - large = loss of accuracy - small = growth of round-off errors +This routine calculates c[0]*L0(x) + c[1]*L1(x) + ... + c[N]*LN(x) -OUTPUT PARAMETERS: - State - structure which stores algorithm state +Parameters: + n - degree, n>=0 + x - argument - -- ALGLIB -- - Copyright 18.10.2008 by Bochkanov Sergey +Result: + the value of the Laguerre polynomial at x *************************************************************************/ -
    void alglib::lsfitcreatef( - real_2d_array x, - real_1d_array y, - real_1d_array c, - double diffstep, - lsfitstate& state); -void alglib::lsfitcreatef( - real_2d_array x, - real_1d_array y, +
    double alglib::laguerresum( real_1d_array c, ae_int_t n, - ae_int_t m, - ae_int_t k, - double diffstep, - lsfitstate& state); + double x, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - + +
    + +fisherlda
    +fisherldan
    + + +
    +
     
    /************************************************************************* -Nonlinear least squares fitting using gradient only, without individual -weights. - -Nonlinear task min(F(c)) is solved, where +Multiclass Fisher LDA - F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2, +Subroutine finds coefficients of linear combination which optimally separates +training set on classes. - * N is a number of points, - * M is a dimension of a space points belong to, - * K is a dimension of a space of parameters being fitted, - * x is a set of N points, each of them is an M-dimensional vector, - * c is a K-dimensional vector of parameters being fitted +COMMERCIAL EDITION OF ALGLIB: -This subroutine uses only f(c,x[i]) and its gradient. + ! Commercial version of ALGLIB includes two important improvements of + ! this function, which can be used from C++ and C#: + ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) + ! * multithreading support + ! + ! Intel MKL gives approximately constant (with respect to number of + ! worker threads) acceleration factor which depends on CPU being used, + ! problem size and "baseline" ALGLIB edition which is used for + ! comparison. Best results are achieved for high-dimensional problems + ! (NVars is at least 256). + ! + ! Multithreading is used to accelerate initial phase of LDA, which + ! includes calculation of products of large matrices. Again, for best + ! efficiency problem must be high-dimensional. + ! + ! Generally, commercial ALGLIB is several times faster than open-source + ! generic C edition, and many times faster than open-source C# edition. + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[0..N-1,0..M-1], points (one row = one point) - Y - array[0..N-1], function values. - C - array[0..K-1], initial approximation to the solution, - N - number of points, N>1 - M - dimension of space - K - number of parameters being fitted - CheapFG - boolean flag, which is: - * True if both function and gradient calculation complexity - are less than O(M^2). An improved algorithm can - be used which corresponds to FGJ scheme from - MINLM unit. - * False otherwise. - Standard Jacibian-bases Levenberg-Marquardt algo - will be used (FJ scheme). + XY - training set, array[0..NPoints-1,0..NVars]. + First NVars columns store values of independent + variables, next column stores number of class (from 0 + to NClasses-1) which dataset element belongs to. Fractional + values are rounded to nearest integer. + NPoints - training set size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 + OUTPUT PARAMETERS: - State - structure which stores algorithm state + Info - return code: + * -4, if internal EVD subroutine hasn't converged + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed (NPoints<0, + NVars<1, NClasses<2) + * 1, if task has been solved + * 2, if there was a multicollinearity in training set, + but task has been solved. + W - linear combination coefficients, array[0..NVars-1] -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 31.05.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitcreatefg( - real_2d_array x, - real_1d_array y, - real_1d_array c, - bool cheapfg, - lsfitstate& state); -void alglib::lsfitcreatefg( - real_2d_array x, - real_1d_array y, - real_1d_array c, - ae_int_t n, - ae_int_t m, - ae_int_t k, - bool cheapfg, - lsfitstate& state); +
    void alglib::fisherlda( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + ae_int_t& info, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Nonlinear least squares fitting using gradient/Hessian, without individial -weights. - -Nonlinear task min(F(c)) is solved, where - - F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2, +N-dimensional multiclass Fisher LDA - * N is a number of points, - * M is a dimension of a space points belong to, - * K is a dimension of a space of parameters being fitted, - * x is a set of N points, each of them is an M-dimensional vector, - * c is a K-dimensional vector of parameters being fitted +Subroutine finds coefficients of linear combinations which optimally separates +training set on classes. It returns N-dimensional basis whose vector are sorted +by quality of training set separation (in descending order). -This subroutine uses f(c,x[i]), its gradient and its Hessian. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[0..N-1,0..M-1], points (one row = one point) - Y - array[0..N-1], function values. - C - array[0..K-1], initial approximation to the solution, - N - number of points, N>1 - M - dimension of space - K - number of parameters being fitted + XY - training set, array[0..NPoints-1,0..NVars]. + First NVars columns store values of independent + variables, next column stores number of class (from 0 + to NClasses-1) which dataset element belongs to. Fractional + values are rounded to nearest integer. + NPoints - training set size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 -OUTPUT PARAMETERS: - State - structure which stores algorithm state +OUTPUT PARAMETERS: + Info - return code: + * -4, if internal EVD subroutine hasn't converged + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed (NPoints<0, + NVars<1, NClasses<2) + * 1, if task has been solved + * 2, if there was a multicollinearity in training set, + but task has been solved. + W - basis, array[0..NVars-1,0..NVars-1] + columns of matrix stores basis vectors, sorted by + quality of training set separation (in descending order) -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 31.05.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitcreatefgh( - real_2d_array x, - real_1d_array y, - real_1d_array c, - lsfitstate& state); -void alglib::lsfitcreatefgh( - real_2d_array x, - real_1d_array y, - real_1d_array c, - ae_int_t n, - ae_int_t m, - ae_int_t k, - lsfitstate& state); +
    void alglib::fisherldan( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + ae_int_t& info, + real_2d_array& w, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - + + +
     
    /************************************************************************* -Weighted nonlinear least squares fitting using function values only. +Calculation of the value of the Legendre polynomial Pn. -Combination of numerical differentiation and secant updates is used to -obtain function Jacobian. +Parameters: + n - degree, n>=0 + x - argument -Nonlinear task min(F(c)) is solved, where +Result: + the value of the Legendre polynomial Pn at x +*************************************************************************/ +
    double alglib::legendrecalculate( + ae_int_t n, + double x, + const xparams _params = alglib::xdefault); - F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, +
    + +
    +
    /************************************************************************* +Representation of Pn as C[0] + C[1]*X + ... + C[N]*X^N - * N is a number of points, - * M is a dimension of a space points belong to, - * K is a dimension of a space of parameters being fitted, - * w is an N-dimensional vector of weight coefficients, - * x is a set of N points, each of them is an M-dimensional vector, - * c is a K-dimensional vector of parameters being fitted +Input parameters: + N - polynomial degree, n>=0 -This subroutine uses only f(c,x[i]). +Output parameters: + C - coefficients +*************************************************************************/ +
    void alglib::legendrecoefficients( + ae_int_t n, + real_1d_array& c, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - X - array[0..N-1,0..M-1], points (one row = one point) - Y - array[0..N-1], function values. - W - weights, array[0..N-1] - C - array[0..K-1], initial approximation to the solution, - N - number of points, N>1 - M - dimension of space - K - number of parameters being fitted - DiffStep- numerical differentiation step; - should not be very small or large; - large = loss of accuracy - small = growth of round-off errors +
    + +
    +
    /************************************************************************* +Summation of Legendre polynomials using Clenshaw's recurrence formula. -OUTPUT PARAMETERS: - State - structure which stores algorithm state +This routine calculates + c[0]*P0(x) + c[1]*P1(x) + ... + c[N]*PN(x) - -- ALGLIB -- - Copyright 18.10.2008 by Bochkanov Sergey +Parameters: + n - degree, n>=0 + x - argument + +Result: + the value of the Legendre polynomial at x *************************************************************************/ -
    void alglib::lsfitcreatewf( - real_2d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array c, - double diffstep, - lsfitstate& state); -void alglib::lsfitcreatewf( - real_2d_array x, - real_1d_array y, - real_1d_array w, +
    double alglib::legendresum( real_1d_array c, ae_int_t n, - ae_int_t m, - ae_int_t k, - double diffstep, - lsfitstate& state); + double x, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - + + +
     
    /************************************************************************* -Weighted nonlinear least squares fitting using gradient only. -Nonlinear task min(F(c)) is solved, where +*************************************************************************/ +
    class lincgreport +{ + ae_int_t iterationscount; + ae_int_t nmv; + ae_int_t terminationtype; + double r2; +}; - F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, +
    + +
    +
    /************************************************************************* +This object stores state of the linear CG method. - * N is a number of points, - * M is a dimension of a space points belong to, - * K is a dimension of a space of parameters being fitted, - * w is an N-dimensional vector of weight coefficients, - * x is a set of N points, each of them is an M-dimensional vector, - * c is a K-dimensional vector of parameters being fitted +You should use ALGLIB functions to work with this object. +Never try to access its fields directly! +*************************************************************************/ +
    class lincgstate +{ +}; -This subroutine uses only f(c,x[i]) and its gradient. +
    + +
    +
    /************************************************************************* +This function initializes linear CG Solver. This solver is used to solve +symmetric positive definite problems. If you want to solve nonsymmetric +(or non-positive definite) problem you may use LinLSQR solver provided by +ALGLIB. + +USAGE: +1. User initializes algorithm state with LinCGCreate() call +2. User tunes solver parameters with LinCGSetCond() and other functions +3. Optionally, user sets starting point with LinCGSetStartingPoint() +4. User calls LinCGSolveSparse() function which takes algorithm state and + SparseMatrix object. +5. User calls LinCGResults() to get solution +6. Optionally, user may call LinCGSolveSparse() again to solve another + problem with different matrix and/or right part without reinitializing + LinCGState structure. INPUT PARAMETERS: - X - array[0..N-1,0..M-1], points (one row = one point) - Y - array[0..N-1], function values. - W - weights, array[0..N-1] - C - array[0..K-1], initial approximation to the solution, - N - number of points, N>1 - M - dimension of space - K - number of parameters being fitted - CheapFG - boolean flag, which is: - * True if both function and gradient calculation complexity - are less than O(M^2). An improved algorithm can - be used which corresponds to FGJ scheme from - MINLM unit. - * False otherwise. - Standard Jacibian-bases Levenberg-Marquardt algo - will be used (FJ scheme). + N - problem dimension, N>0 OUTPUT PARAMETERS: State - structure which stores algorithm state -See also: - LSFitResults - LSFitCreateFG (fitting without weights) - LSFitCreateWFGH (fitting using Hessian) - LSFitCreateFGH (fitting using Hessian, without weights) - -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 14.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitcreatewfg( - real_2d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array c, - bool cheapfg, - lsfitstate& state); -void alglib::lsfitcreatewfg( - real_2d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array c, +
    void alglib::lincgcreate( ae_int_t n, - ae_int_t m, - ae_int_t k, - bool cheapfg, - lsfitstate& state); + lincgstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Weighted nonlinear least squares fitting using gradient/Hessian. +CG-solver: results. -Nonlinear task min(F(c)) is solved, where +This function must be called after LinCGSolve - F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, +INPUT PARAMETERS: + State - algorithm state - * N is a number of points, - * M is a dimension of a space points belong to, - * K is a dimension of a space of parameters being fitted, - * w is an N-dimensional vector of weight coefficients, - * x is a set of N points, each of them is an M-dimensional vector, - * c is a K-dimensional vector of parameters being fitted +OUTPUT PARAMETERS: + X - array[N], solution + Rep - optimization report: + * Rep.TerminationType completetion code: + * -5 input matrix is either not positive definite, + too large or too small + * -4 overflow/underflow during solution + (ill conditioned problem) + * 1 ||residual||<=EpsF*||b|| + * 5 MaxIts steps was taken + * 7 rounding errors prevent further progress, + best point found is returned + * Rep.IterationsCount contains iterations count + * NMV countains number of matrix-vector calculations -This subroutine uses f(c,x[i]), its gradient and its Hessian. + -- ALGLIB -- + Copyright 14.11.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lincgresults( + lincgstate state, + real_1d_array& x, + lincgreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets stopping criteria. INPUT PARAMETERS: - X - array[0..N-1,0..M-1], points (one row = one point) - Y - array[0..N-1], function values. - W - weights, array[0..N-1] - C - array[0..K-1], initial approximation to the solution, - N - number of points, N>1 - M - dimension of space - K - number of parameters being fitted + EpsF - algorithm will be stopped if norm of residual is less than + EpsF*||b||. + MaxIts - algorithm will be stopped if number of iterations is more + than MaxIts. OUTPUT PARAMETERS: State - structure which stores algorithm state +NOTES: +If both EpsF and MaxIts are zero then small EpsF will be set to small +value. + -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 14.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitcreatewfgh( - real_2d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array c, - lsfitstate& state); -void alglib::lsfitcreatewfgh( - real_2d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array c, - ae_int_t n, - ae_int_t m, - ae_int_t k, - lsfitstate& state); +
    void alglib::lincgsetcond( + lincgstate state, + double epsf, + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This family of functions is used to launcn iterations of nonlinear fitter +This function changes preconditioning settings of LinCGSolveSparse() +function. LinCGSolveSparse() will use diagonal of the system matrix as +preconditioner. This preconditioning mode is active by default. -These functions accept following parameters: - state - algorithm state - func - callback which calculates function (or merit function) - value func at given point x - grad - callback which calculates function (or merit function) - value func and gradient grad at given point x - hess - callback which calculates function (or merit function) - value func, gradient grad and Hessian hess at given point x - rep - optional callback which is called after each iteration - can be NULL - ptr - optional pointer which is passed to func/grad/hess/jac/rep - can be NULL +INPUT PARAMETERS: + State - structure which stores algorithm state -NOTES: + -- ALGLIB -- + Copyright 19.11.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lincgsetprecdiag( + lincgstate state, + const xparams _params = alglib::xdefault); -1. this algorithm is somewhat unusual because it works with parameterized - function f(C,X), where X is a function argument (we have many points - which are characterized by different argument values), and C is a - parameter to fit. +
    + +
    +
    /************************************************************************* +This function changes preconditioning settings of LinCGSolveSparse() +function. By default, SolveSparse() uses diagonal preconditioner, but if +you want to use solver without preconditioning, you can call this function +which forces solver to use unit matrix for preconditioning. - For example, if we want to do linear fit by f(c0,c1,x) = c0*x+c1, then - x will be argument, and {c0,c1} will be parameters. +INPUT PARAMETERS: + State - structure which stores algorithm state - It is important to understand that this algorithm finds minimum in the - space of function PARAMETERS (not arguments), so it needs derivatives - of f() with respect to C, not X. + -- ALGLIB -- + Copyright 19.11.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lincgsetprecunit( + lincgstate state, + const xparams _params = alglib::xdefault); - In the example above it will need f=c0*x+c1 and {df/dc0,df/dc1} = {x,1} - instead of {df/dx} = {c0}. +
    + +
    +
    /************************************************************************* +This function sets restart frequency. By default, algorithm is restarted +after N subsequent iterations. -2. Callback functions accept C as the first parameter, and X as the second + -- ALGLIB -- + Copyright 14.11.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lincgsetrestartfreq( + lincgstate state, + ae_int_t srf, + const xparams _params = alglib::xdefault); -3. If state was created with LSFitCreateFG(), algorithm needs just - function and its gradient, but if state was created with - LSFitCreateFGH(), algorithm will need function, gradient and Hessian. +
    + +
    +
    /************************************************************************* +This function sets frequency of residual recalculations. - According to the said above, there ase several versions of this - function, which accept different sets of callbacks. +Algorithm updates residual r_k using iterative formula, but recalculates +it from scratch after each 10 iterations. It is done to avoid accumulation +of numerical errors and to stop algorithm when r_k starts to grow. - This flexibility opens way to subtle errors - you may create state with - LSFitCreateFGH() (optimization using Hessian), but call function which - does not accept Hessian. So when algorithm will request Hessian, there - will be no callback to call. In this case exception will be thrown. +Such low update frequence (1/10) gives very little overhead, but makes +algorithm a bit more robust against numerical errors. However, you may +change it - Be careful to avoid such errors because there is no way to find them at - compile time - you can see them at runtime only. +INPUT PARAMETERS: + Freq - desired update frequency, Freq>=0. + Zero value means that no updates will be done. -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 14.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void lsfitfit(lsfitstate &state, - void (*func)(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr), - void (*rep)(const real_1d_array &c, double func, void *ptr) = NULL, - void *ptr = NULL); -void lsfitfit(lsfitstate &state, - void (*func)(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr), - void (*grad)(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*rep)(const real_1d_array &c, double func, void *ptr) = NULL, - void *ptr = NULL); -void lsfitfit(lsfitstate &state, - void (*func)(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr), - void (*grad)(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*hess)(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr), - void (*rep)(const real_1d_array &c, double func, void *ptr) = NULL, - void *ptr = NULL); +
    void alglib::lincgsetrupdatefreq( + lincgstate state, + ae_int_t freq, + const xparams _params = alglib::xdefault); +
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -Linear least squares fitting. +This function sets starting point. +By default, zero starting point is used. -QR decomposition is used to reduce task to MxM, then triangular solver or -SVD-based solver is used depending on condition number of the system. It -allows to maximize speed and retain decent accuracy. +INPUT PARAMETERS: + X - starting point, array[N] -IMPORTANT: if you want to perform polynomial fitting, it may be more - convenient to use PolynomialFit() function. This function gives - best results on polynomial problems and solves numerical - stability issues which arise when you fit high-degree - polynomials to your data. +OUTPUT PARAMETERS: + State - structure which stores algorithm state -COMMERCIAL EDITION OF ALGLIB: + -- ALGLIB -- + Copyright 14.11.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lincgsetstartingpoint( + lincgstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +
    + +
    +
    /************************************************************************* +This function turns on/off reporting. INPUT PARAMETERS: - Y - array[0..N-1] Function values in N points. - FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. - FMatrix[I, J] - value of J-th basis function in I-th point. - N - number of points used. N>=1. - M - number of basis functions, M>=1. - -OUTPUT PARAMETERS: - Info - error code: - * -4 internal SVD decomposition subroutine failed (very - rare and for degenerate systems only) - * 1 task is solved - C - decomposition coefficients, array[0..M-1] - Rep - fitting report. Following fields are set: - * Rep.TaskRCond reciprocal of condition number - * R2 non-adjusted coefficient of determination - (non-weighted) - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not -ERRORS IN PARAMETERS +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to MinCGOptimize(). -This solver also calculates different kinds of errors in parameters and -fills corresponding fields of report: -* Rep.CovPar covariance matrix for parameters, array[K,K]. -* Rep.ErrPar errors in parameters, array[K], - errpar = sqrt(diag(CovPar)) -* Rep.ErrCurve vector of fit errors - standard deviations of empirical - best-fit curve from "ideal" best-fit curve built with - infinite number of samples, array[N]. - errcurve = sqrt(diag(F*CovPar*F')), - where F is functions matrix. -* Rep.Noise vector of per-point estimates of noise, array[N] + -- ALGLIB -- + Copyright 14.11.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lincgsetxrep( + lincgstate state, + bool needxrep, + const xparams _params = alglib::xdefault); -NOTE: noise in the data is estimated as follows: - * for fitting without user-supplied weights all points are - assumed to have same level of noise, which is estimated from - the data - * for fitting with user-supplied weights we assume that noise - level in I-th point is inversely proportional to Ith weight. - Coefficient of proportionality is estimated from the data. +
    + +
    +
    /************************************************************************* +Procedure for solution of A*x=b with sparse A. -NOTE: we apply small amount of regularization when we invert squared - Jacobian and calculate covariance matrix. It guarantees that - algorithm won't divide by zero during inversion, but skews - error estimates a bit (fractional error is about 10^-9). +INPUT PARAMETERS: + State - algorithm state + A - sparse matrix in the CRS format (you MUST contvert it to + CRS format by calling SparseConvertToCRS() function). + IsUpper - whether upper or lower triangle of A is used: + * IsUpper=True => only upper triangle is used and lower + triangle is not referenced at all + * IsUpper=False => only lower triangle is used and upper + triangle is not referenced at all + B - right part, array[N] - However, we believe that this difference is insignificant for - all practical purposes except for the situation when you want - to compare ALGLIB results with "reference" implementation up - to the last significant digit. +RESULT: + This function returns no result. + You can get solution by calling LinCGResults() -NOTE: covariance matrix is estimated using correction for degrees - of freedom (covariances are divided by N-M instead of dividing - by N). +NOTE: this function uses lightweight preconditioning - multiplication by + inverse of diag(A). If you want, you can turn preconditioning off by + calling LinCGSetPrecUnit(). However, preconditioning cost is low and + preconditioner is very important for solution of badly scaled + problems. -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 14.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitlinear( - real_1d_array y, - real_2d_array fmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::lsfitlinear( - real_1d_array y, - real_2d_array fmatrix, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinear( - real_1d_array y, - real_2d_array fmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinear( - real_1d_array y, - real_2d_array fmatrix, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); +
    void alglib::lincgsolvesparse( + lincgstate state, + sparsematrix a, + bool isupper, + real_1d_array b, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -Constained linear least squares fitting. - -This is variation of LSFitLinear(), which searchs for min|A*x=b| given -that K additional constaints C*x=bc are satisfied. It reduces original -task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinear() -is called. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "solvers.h" -IMPORTANT: if you want to perform polynomial fitting, it may be more - convenient to use PolynomialFit() function. This function gives - best results on polynomial problems and solves numerical - stability issues which arise when you fit high-degree - polynomials to your data. +using namespace alglib; -COMMERCIAL EDITION OF ALGLIB: - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +int main(int argc, char **argv) +{ + // + // This example illustrates solution of sparse linear systems with + // conjugate gradient method. + // + // Suppose that we have linear system A*x=b with sparse symmetric + // positive definite A (represented by sparsematrix object) + // [ 5 1 ] + // [ 1 7 2 ] + // A = [ 2 8 1 ] + // [ 1 4 1 ] + // [ 1 4 ] + // and right part b + // [ 7 ] + // [ 17 ] + // b = [ 14 ] + // [ 10 ] + // [ 6 ] + // and we want to solve this system using sparse linear CG. In order + // to do so, we have to create left part (sparsematrix object) and + // right part (dense array). + // + // Initially, sparse matrix is created in the Hash-Table format, + // which allows easy initialization, but do not allow matrix to be + // used in the linear solvers. So after construction you should convert + // sparse matrix to CRS format (one suited for linear operations). + // + // It is important to note that in our example we initialize full + // matrix A, both lower and upper triangles. However, it is symmetric + // and sparse solver needs just one half of the matrix. So you may + // save about half of the space by filling only one of the triangles. + // + sparsematrix a; + sparsecreate(5, 5, a); + sparseset(a, 0, 0, 5.0); + sparseset(a, 0, 1, 1.0); + sparseset(a, 1, 0, 1.0); + sparseset(a, 1, 1, 7.0); + sparseset(a, 1, 2, 2.0); + sparseset(a, 2, 1, 2.0); + sparseset(a, 2, 2, 8.0); + sparseset(a, 2, 3, 1.0); + sparseset(a, 3, 2, 1.0); + sparseset(a, 3, 3, 4.0); + sparseset(a, 3, 4, 1.0); + sparseset(a, 4, 3, 1.0); + sparseset(a, 4, 4, 4.0); -INPUT PARAMETERS: - Y - array[0..N-1] Function values in N points. - FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. - FMatrix[I,J] - value of J-th basis function in I-th point. - CMatrix - a table of constaints, array[0..K-1,0..M]. - I-th row of CMatrix corresponds to I-th linear constraint: - CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] - N - number of points used. N>=1. - M - number of basis functions, M>=1. - K - number of constraints, 0 <= K < M - K=0 corresponds to absence of constraints. + // + // Now our matrix is fully initialized, but we have to do one more + // step - convert it from Hash-Table format to CRS format (see + // documentation on sparse matrices for more information about these + // formats). + // + // If you omit this call, ALGLIB will generate exception on the first + // attempt to use A in linear operations. + // + sparseconverttocrs(a); -OUTPUT PARAMETERS: - Info - error code: - * -4 internal SVD decomposition subroutine failed (very - rare and for degenerate systems only) - * -3 either too many constraints (M or more), - degenerate constraints (some constraints are - repetead twice) or inconsistent constraints were - specified. - * 1 task is solved - C - decomposition coefficients, array[0..M-1] - Rep - fitting report. Following fields are set: - * R2 non-adjusted coefficient of determination - (non-weighted) - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED + // + // Initialization of the right part + // + real_1d_array b = "[7,17,14,10,6]"; -IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. + // + // Now we have to create linear solver object and to use it for the + // solution of the linear system. + // + // NOTE: lincgsolvesparse() accepts additional parameter which tells + // what triangle of the symmetric matrix should be used - upper + // or lower. Because we've filled both parts of the matrix, we + // can use any part - upper or lower. + // + lincgstate s; + lincgreport rep; + real_1d_array x; + lincgcreate(5, s); + lincgsolvesparse(s, a, true, b); + lincgresults(s, x, rep); -ERRORS IN PARAMETERS + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 + printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [1.000,2.000,1.000,2.000,1.000] + return 0; +} -This solver also calculates different kinds of errors in parameters and -fills corresponding fields of report: -* Rep.CovPar covariance matrix for parameters, array[K,K]. -* Rep.ErrPar errors in parameters, array[K], - errpar = sqrt(diag(CovPar)) -* Rep.ErrCurve vector of fit errors - standard deviations of empirical - best-fit curve from "ideal" best-fit curve built with - infinite number of samples, array[N]. - errcurve = sqrt(diag(F*CovPar*F')), - where F is functions matrix. -* Rep.Noise vector of per-point estimates of noise, array[N] -IMPORTANT: errors in parameters are calculated without taking into - account boundary/linear constraints! Presence of constraints - changes distribution of errors, but there is no easy way to - account for constraints when you calculate covariance matrix. +
    + + +
    +
    /************************************************************************* -NOTE: noise in the data is estimated as follows: - * for fitting without user-supplied weights all points are - assumed to have same level of noise, which is estimated from - the data - * for fitting with user-supplied weights we assume that noise - level in I-th point is inversely proportional to Ith weight. - Coefficient of proportionality is estimated from the data. +*************************************************************************/ +
    class linlsqrreport +{ + ae_int_t iterationscount; + ae_int_t nmv; + ae_int_t terminationtype; +}; -NOTE: we apply small amount of regularization when we invert squared - Jacobian and calculate covariance matrix. It guarantees that - algorithm won't divide by zero during inversion, but skews - error estimates a bit (fractional error is about 10^-9). +
    + +
    +
    /************************************************************************* +This object stores state of the LinLSQR method. - However, we believe that this difference is insignificant for - all practical purposes except for the situation when you want - to compare ALGLIB results with "reference" implementation up - to the last significant digit. +You should use ALGLIB functions to work with this object. +*************************************************************************/ +
    class linlsqrstate +{ +}; -NOTE: covariance matrix is estimated using correction for degrees - of freedom (covariances are divided by N-M instead of dividing - by N). +
    + +
    +
    /************************************************************************* +This function initializes linear LSQR Solver. This solver is used to solve +non-symmetric (and, possibly, non-square) problems. Least squares solution +is returned for non-compatible systems. + +USAGE: +1. User initializes algorithm state with LinLSQRCreate() call +2. User tunes solver parameters with LinLSQRSetCond() and other functions +3. User calls LinLSQRSolveSparse() function which takes algorithm state + and SparseMatrix object. +4. User calls LinLSQRResults() to get solution +5. Optionally, user may call LinLSQRSolveSparse() again to solve another + problem with different matrix and/or right part without reinitializing + LinLSQRState structure. + +INPUT PARAMETERS: + M - number of rows in A + N - number of variables, N>0 + +OUTPUT PARAMETERS: + State - structure which stores algorithm state + +NOTE: see also linlsqrcreatebuf() for version which reuses previously + allocated place as much as possible. -- ALGLIB -- - Copyright 07.09.2009 by Bochkanov Sergey + Copyright 30.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitlinearc( - real_1d_array y, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::lsfitlinearc( - real_1d_array y, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t n, +
    void alglib::linlsqrcreate( ae_int_t m, - ae_int_t k, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinearc( - real_1d_array y, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinearc( - real_1d_array y, - real_2d_array fmatrix, - real_2d_array cmatrix, ae_int_t n, - ae_int_t m, - ae_int_t k, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); + linlsqrstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Weighted linear least squares fitting. +This function initializes linear LSQR Solver. It provides exactly same +functionality as linlsqrcreate(), but reuses previously allocated space +as much as possible. -QR decomposition is used to reduce task to MxM, then triangular solver or -SVD-based solver is used depending on condition number of the system. It -allows to maximize speed and retain decent accuracy. +INPUT PARAMETERS: + M - number of rows in A + N - number of variables, N>0 -IMPORTANT: if you want to perform polynomial fitting, it may be more - convenient to use PolynomialFit() function. This function gives - best results on polynomial problems and solves numerical - stability issues which arise when you fit high-degree - polynomials to your data. +OUTPUT PARAMETERS: + State - structure which stores algorithm state -COMMERCIAL EDITION OF ALGLIB: + -- ALGLIB -- + Copyright 14.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::linlsqrcreatebuf( + ae_int_t m, + ae_int_t n, + linlsqrstate state, + const xparams _params = alglib::xdefault); - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +
    + +
    +
    /************************************************************************* +This function is used to peek into LSQR solver and get current iteration +counter. You can safely "peek" into the solver from another thread. INPUT PARAMETERS: - Y - array[0..N-1] Function values in N points. - W - array[0..N-1] Weights corresponding to function values. - Each summand in square sum of approximation deviations - from given values is multiplied by the square of - corresponding weight. - FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. - FMatrix[I, J] - value of J-th basis function in I-th point. - N - number of points used. N>=1. - M - number of basis functions, M>=1. + S - solver object -OUTPUT PARAMETERS: - Info - error code: - * -4 internal SVD decomposition subroutine failed (very - rare and for degenerate systems only) - * -1 incorrect N/M were specified - * 1 task is solved - C - decomposition coefficients, array[0..M-1] - Rep - fitting report. Following fields are set: - * Rep.TaskRCond reciprocal of condition number - * R2 non-adjusted coefficient of determination - (non-weighted) - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED +RESULT: + iteration counter, in [0,INF) -ERRORS IN PARAMETERS + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::linlsqrpeekiterationscount( + linlsqrstate s, + const xparams _params = alglib::xdefault); -This solver also calculates different kinds of errors in parameters and -fills corresponding fields of report: -* Rep.CovPar covariance matrix for parameters, array[K,K]. -* Rep.ErrPar errors in parameters, array[K], - errpar = sqrt(diag(CovPar)) -* Rep.ErrCurve vector of fit errors - standard deviations of empirical - best-fit curve from "ideal" best-fit curve built with - infinite number of samples, array[N]. - errcurve = sqrt(diag(F*CovPar*F')), - where F is functions matrix. -* Rep.Noise vector of per-point estimates of noise, array[N] +
    + +
    +
    /************************************************************************* +This subroutine submits request for termination of the running solver. It +can be called from some other thread which wants LSQR solver to terminate +(obviously, the thread running LSQR solver can not request termination +because it is already busy working on LSQR). -NOTE: noise in the data is estimated as follows: - * for fitting without user-supplied weights all points are - assumed to have same level of noise, which is estimated from - the data - * for fitting with user-supplied weights we assume that noise - level in I-th point is inversely proportional to Ith weight. - Coefficient of proportionality is estimated from the data. +As result, solver stops at point which was "current accepted" when +termination request was submitted and returns error code 8 (successful +termination). Such termination is a smooth process which properly +deallocates all temporaries. -NOTE: we apply small amount of regularization when we invert squared - Jacobian and calculate covariance matrix. It guarantees that - algorithm won't divide by zero during inversion, but skews - error estimates a bit (fractional error is about 10^-9). +INPUT PARAMETERS: + State - solver structure - However, we believe that this difference is insignificant for - all practical purposes except for the situation when you want - to compare ALGLIB results with "reference" implementation up - to the last significant digit. +NOTE: calling this function on solver which is NOT running will have no + effect. -NOTE: covariance matrix is estimated using correction for degrees - of freedom (covariances are divided by N-M instead of dividing - by N). +NOTE: multiple calls to this function are possible. First call is counted, + subsequent calls are silently ignored. + +NOTE: solver clears termination flag on its start, it means that if some + other thread will request termination too soon, its request will went + unnoticed. -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitlinearw( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::lsfitlinearw( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinearw( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinearw( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); +
    void alglib::linlsqrrequesttermination( + linlsqrstate state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Weighted constained linear least squares fitting. +LSQR solver: results. -This is variation of LSFitLinearW(), which searchs for min|A*x=b| given -that K additional constaints C*x=bc are satisfied. It reduces original -task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinearW() -is called. +This function must be called after LinLSQRSolve -IMPORTANT: if you want to perform polynomial fitting, it may be more - convenient to use PolynomialFit() function. This function gives - best results on polynomial problems and solves numerical - stability issues which arise when you fit high-degree - polynomials to your data. +INPUT PARAMETERS: + State - algorithm state -COMMERCIAL EDITION OF ALGLIB: +OUTPUT PARAMETERS: + X - array[N], solution + Rep - optimization report: + * Rep.TerminationType completetion code: + * 1 ||Rk||<=EpsB*||B|| + * 4 ||A^T*Rk||/(||A||*||Rk||)<=EpsA + * 5 MaxIts steps was taken + * 7 rounding errors prevent further progress, + X contains best point found so far. + (sometimes returned on singular systems) + * 8 user requested termination via calling + linlsqrrequesttermination() + * Rep.IterationsCount contains iterations count + * NMV countains number of matrix-vector calculations - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + -- ALGLIB -- + Copyright 30.11.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::linlsqrresults( + linlsqrstate state, + real_1d_array& x, + linlsqrreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets stopping criteria. INPUT PARAMETERS: - Y - array[0..N-1] Function values in N points. - W - array[0..N-1] Weights corresponding to function values. - Each summand in square sum of approximation deviations - from given values is multiplied by the square of - corresponding weight. - FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. - FMatrix[I,J] - value of J-th basis function in I-th point. - CMatrix - a table of constaints, array[0..K-1,0..M]. - I-th row of CMatrix corresponds to I-th linear constraint: - CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] - N - number of points used. N>=1. - M - number of basis functions, M>=1. - K - number of constraints, 0 <= K < M - K=0 corresponds to absence of constraints. + EpsA - algorithm will be stopped if ||A^T*Rk||/(||A||*||Rk||)<=EpsA. + EpsB - algorithm will be stopped if ||Rk||<=EpsB*||B|| + MaxIts - algorithm will be stopped if number of iterations + more than MaxIts. OUTPUT PARAMETERS: - Info - error code: - * -4 internal SVD decomposition subroutine failed (very - rare and for degenerate systems only) - * -3 either too many constraints (M or more), - degenerate constraints (some constraints are - repetead twice) or inconsistent constraints were - specified. - * 1 task is solved - C - decomposition coefficients, array[0..M-1] - Rep - fitting report. Following fields are set: - * R2 non-adjusted coefficient of determination - (non-weighted) - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED + State - structure which stores algorithm state -IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. +NOTE: if EpsA,EpsB,EpsC and MaxIts are zero then these variables will +be setted as default values. -ERRORS IN PARAMETERS + -- ALGLIB -- + Copyright 30.11.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::linlsqrsetcond( + linlsqrstate state, + double epsa, + double epsb, + ae_int_t maxits, + const xparams _params = alglib::xdefault); -This solver also calculates different kinds of errors in parameters and -fills corresponding fields of report: -* Rep.CovPar covariance matrix for parameters, array[K,K]. -* Rep.ErrPar errors in parameters, array[K], - errpar = sqrt(diag(CovPar)) -* Rep.ErrCurve vector of fit errors - standard deviations of empirical - best-fit curve from "ideal" best-fit curve built with - infinite number of samples, array[N]. - errcurve = sqrt(diag(F*CovPar*F')), - where F is functions matrix. -* Rep.Noise vector of per-point estimates of noise, array[N] +
    + +
    +
    /************************************************************************* +This function sets optional Tikhonov regularization coefficient. +It is zero by default. -IMPORTANT: errors in parameters are calculated without taking into - account boundary/linear constraints! Presence of constraints - changes distribution of errors, but there is no easy way to - account for constraints when you calculate covariance matrix. - -NOTE: noise in the data is estimated as follows: - * for fitting without user-supplied weights all points are - assumed to have same level of noise, which is estimated from - the data - * for fitting with user-supplied weights we assume that noise - level in I-th point is inversely proportional to Ith weight. - Coefficient of proportionality is estimated from the data. - -NOTE: we apply small amount of regularization when we invert squared - Jacobian and calculate covariance matrix. It guarantees that - algorithm won't divide by zero during inversion, but skews - error estimates a bit (fractional error is about 10^-9). - - However, we believe that this difference is insignificant for - all practical purposes except for the situation when you want - to compare ALGLIB results with "reference" implementation up - to the last significant digit. +INPUT PARAMETERS: + LambdaI - regularization factor, LambdaI>=0 -NOTE: covariance matrix is estimated using correction for degrees - of freedom (covariances are divided by N-M instead of dividing - by N). +OUTPUT PARAMETERS: + State - structure which stores algorithm state -- ALGLIB -- - Copyright 07.09.2009 by Bochkanov Sergey + Copyright 30.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitlinearwc( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::lsfitlinearwc( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t n, - ae_int_t m, - ae_int_t k, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinearwc( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); -void alglib::smp_lsfitlinearwc( - real_1d_array y, - real_1d_array w, - real_2d_array fmatrix, - real_2d_array cmatrix, - ae_int_t n, - ae_int_t m, - ae_int_t k, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); +
    void alglib::linlsqrsetlambdai( + linlsqrstate state, + double lambdai, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Nonlinear least squares fitting results. - -Called after return from LSFitFit(). +This function changes preconditioning settings of LinCGSolveSparse() +function. LinCGSolveSparse() will use diagonal of the system matrix as +preconditioner. This preconditioning mode is active by default. INPUT PARAMETERS: - State - algorithm state - -OUTPUT PARAMETERS: - Info - completion code: - * -7 gradient verification failed. - See LSFitSetGradientCheck() for more information. - * 1 relative function improvement is no more than - EpsF. - * 2 relative step is no more than EpsX. - * 4 gradient norm is no more than EpsG - * 5 MaxIts steps was taken - * 7 stopping conditions are too stringent, - further improvement is impossible - C - array[0..K-1], solution - Rep - optimization report. On success following fields are set: - * R2 non-adjusted coefficient of determination - (non-weighted) - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED - * WRMSError weighted rms error on the (X,Y). - -ERRORS IN PARAMETERS - -This solver also calculates different kinds of errors in parameters and -fills corresponding fields of report: -* Rep.CovPar covariance matrix for parameters, array[K,K]. -* Rep.ErrPar errors in parameters, array[K], - errpar = sqrt(diag(CovPar)) -* Rep.ErrCurve vector of fit errors - standard deviations of empirical - best-fit curve from "ideal" best-fit curve built with - infinite number of samples, array[N]. - errcurve = sqrt(diag(J*CovPar*J')), - where J is Jacobian matrix. -* Rep.Noise vector of per-point estimates of noise, array[N] - -IMPORTANT: errors in parameters are calculated without taking into - account boundary/linear constraints! Presence of constraints - changes distribution of errors, but there is no easy way to - account for constraints when you calculate covariance matrix. - -NOTE: noise in the data is estimated as follows: - * for fitting without user-supplied weights all points are - assumed to have same level of noise, which is estimated from - the data - * for fitting with user-supplied weights we assume that noise - level in I-th point is inversely proportional to Ith weight. - Coefficient of proportionality is estimated from the data. + State - structure which stores algorithm state -NOTE: we apply small amount of regularization when we invert squared - Jacobian and calculate covariance matrix. It guarantees that - algorithm won't divide by zero during inversion, but skews - error estimates a bit (fractional error is about 10^-9). + -- ALGLIB -- + Copyright 19.11.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::linlsqrsetprecdiag( + linlsqrstate state, + const xparams _params = alglib::xdefault); - However, we believe that this difference is insignificant for - all practical purposes except for the situation when you want - to compare ALGLIB results with "reference" implementation up - to the last significant digit. +
    + +
    +
    /************************************************************************* +This function changes preconditioning settings of LinLSQQSolveSparse() +function. By default, SolveSparse() uses diagonal preconditioner, but if +you want to use solver without preconditioning, you can call this function +which forces solver to use unit matrix for preconditioning. -NOTE: covariance matrix is estimated using correction for degrees - of freedom (covariances are divided by N-M instead of dividing - by N). +INPUT PARAMETERS: + State - structure which stores algorithm state -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 19.11.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitresults( - lsfitstate state, - ae_int_t& info, - real_1d_array& c, - lsfitreport& rep); +
    void alglib::linlsqrsetprecunit( + linlsqrstate state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -This function sets boundary constraints for underlying optimizer - -Boundary constraints are inactive by default (after initial creation). -They are preserved until explicitly turned off with another SetBC() call. +This function turns on/off reporting. INPUT PARAMETERS: - State - structure stores algorithm state - BndL - lower bounds, array[K]. - If some (all) variables are unbounded, you may specify - very small number or -INF (latter is recommended because - it will allow solver to use better algorithm). - BndU - upper bounds, array[K]. - If some (all) variables are unbounded, you may specify - very large number or +INF (latter is recommended because - it will allow solver to use better algorithm). - -NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th -variable will be "frozen" at X[i]=BndL[i]=BndU[i]. + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not -NOTE 2: unlike other constrained optimization algorithms, this solver has -following useful properties: -* bound constraints are always satisfied exactly -* function is evaluated only INSIDE area specified by bound constraints +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to MinCGOptimize(). -- ALGLIB -- - Copyright 14.01.2011 by Bochkanov Sergey + Copyright 30.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitsetbc( - lsfitstate state, - real_1d_array bndl, - real_1d_array bndu); +
    void alglib::linlsqrsetxrep( + linlsqrstate state, + bool needxrep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Stopping conditions for nonlinear least squares fitting. +Procedure for solution of A*x=b with sparse A. INPUT PARAMETERS: - State - structure which stores algorithm state - EpsF - stopping criterion. Algorithm stops if - |F(k+1)-F(k)| <= EpsF*max{|F(k)|, |F(k+1)|, 1} - EpsX - >=0 - The subroutine finishes its work if on k+1-th iteration - the condition |v|<=EpsX is fulfilled, where: - * |.| means Euclidian norm - * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - ste pvector, dx=X(k+1)-X(k) - * s - scaling coefficients set by LSFitSetScale() - MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. Only Levenberg-Marquardt - iterations are counted (L-BFGS/CG iterations are NOT - counted because their cost is very low compared to that of - LM). - -NOTE + State - algorithm state + A - sparse M*N matrix in the CRS format (you MUST contvert it + to CRS format by calling SparseConvertToCRS() function + BEFORE you pass it to this function). + B - right part, array[M] -Passing EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic -stopping criterion selection (according to the scheme used by MINLM unit). +RESULT: + This function returns no result. + You can get solution by calling LinCGResults() +NOTE: this function uses lightweight preconditioning - multiplication by + inverse of diag(A). If you want, you can turn preconditioning off by + calling LinLSQRSetPrecUnit(). However, preconditioning cost is low + and preconditioner is very important for solution of badly scaled + problems. -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + Copyright 30.11.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitsetcond( - lsfitstate state, - double epsf, - double epsx, - ae_int_t maxits); +
    void alglib::linlsqrsolvesparse( + linlsqrstate state, + sparsematrix a, + real_1d_array b, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -This subroutine turns on verification of the user-supplied analytic -gradient: -* user calls this subroutine before fitting begins -* LSFitFit() is called -* prior to actual fitting, for each point in data set X_i and each - component of parameters being fited C_j algorithm performs following - steps: - * two trial steps are made to C_j-TestStep*S[j] and C_j+TestStep*S[j], - where C_j is j-th parameter and S[j] is a scale of j-th parameter - * if needed, steps are bounded with respect to constraints on C[] - * F(X_i|C) is evaluated at these trial points - * we perform one more evaluation in the middle point of the interval - * we build cubic model using function values and derivatives at trial - points and we compare its prediction with actual value in the middle - point - * in case difference between prediction and actual value is higher than - some predetermined threshold, algorithm stops with completion code -7; - Rep.VarIdx is set to index of the parameter with incorrect derivative. -* after verification is over, algorithm proceeds to the actual optimization. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "solvers.h" -NOTE 1: verification needs N*K (points count * parameters count) gradient - evaluations. It is very costly and you should use it only for low - dimensional problems, when you want to be sure that you've - correctly calculated analytic derivatives. You should not use it - in the production code (unless you want to check derivatives - provided by some third party). +using namespace alglib; -NOTE 2: you should carefully choose TestStep. Value which is too large - (so large that function behaviour is significantly non-cubic) will - lead to false alarms. You may use different step for different - parameters by means of setting scale with LSFitSetScale(). -NOTE 3: this function may lead to false positives. In case it reports that - I-th derivative was calculated incorrectly, you may decrease test - step and try one more time - maybe your function changes too - sharply and your step is too large for such rapidly chanding - function. +int main(int argc, char **argv) +{ + // + // This example illustrates solution of sparse linear least squares problem + // with LSQR algorithm. + // + // Suppose that we have least squares problem min|A*x-b| with sparse A + // represented by sparsematrix object + // [ 1 1 ] + // [ 1 1 ] + // A = [ 2 1 ] + // [ 1 ] + // [ 1 ] + // and right part b + // [ 4 ] + // [ 2 ] + // b = [ 4 ] + // [ 1 ] + // [ 2 ] + // and we want to solve this system in the least squares sense using + // LSQR algorithm. In order to do so, we have to create left part + // (sparsematrix object) and right part (dense array). + // + // Initially, sparse matrix is created in the Hash-Table format, + // which allows easy initialization, but do not allow matrix to be + // used in the linear solvers. So after construction you should convert + // sparse matrix to CRS format (one suited for linear operations). + // + sparsematrix a; + sparsecreate(5, 2, a); + sparseset(a, 0, 0, 1.0); + sparseset(a, 0, 1, 1.0); + sparseset(a, 1, 0, 1.0); + sparseset(a, 1, 1, 1.0); + sparseset(a, 2, 0, 2.0); + sparseset(a, 2, 1, 1.0); + sparseset(a, 3, 0, 1.0); + sparseset(a, 4, 1, 1.0); -NOTE 4: this function works only for optimizers created with LSFitCreateWFG() - or LSFitCreateFG() constructors. + // + // Now our matrix is fully initialized, but we have to do one more + // step - convert it from Hash-Table format to CRS format (see + // documentation on sparse matrices for more information about these + // formats). + // + // If you omit this call, ALGLIB will generate exception on the first + // attempt to use A in linear operations. + // + sparseconverttocrs(a); -INPUT PARAMETERS: - State - structure used to store algorithm state - TestStep - verification step: - * TestStep=0 turns verification off - * TestStep>0 activates verification + // + // Initialization of the right part + // + real_1d_array b = "[4,2,4,1,2]"; - -- ALGLIB -- - Copyright 15.06.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::lsfitsetgradientcheck(lsfitstate state, double teststep); + // + // Now we have to create linear solver object and to use it for the + // solution of the linear system. + // + linlsqrstate s; + linlsqrreport rep; + real_1d_array x; + linlsqrcreate(5, 2, s); + linlsqrsolvesparse(s, a, b); + linlsqrresults(s, x, rep); -
    - -
    -
    /************************************************************************* -This function sets scaling coefficients for underlying optimizer. + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 + printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [1.000,2.000] + return 0; +} -ALGLIB optimizers use scaling matrices to test stopping conditions (step -size and gradient are scaled before comparison with tolerances). Scale of -the I-th variable is a translation invariant measure of: -a) "how large" the variable is -b) how large the step should be to make significant changes in the function -Generally, scale is NOT considered to be a form of preconditioner. But LM -optimizer is unique in that it uses scaling matrix both in the stopping -condition tests and as Marquardt damping factor. +
    +
    + +linearmodel
    +lrreport
    + +lravgerror
    +lravgrelerror
    +lrbuild
    +lrbuilds
    +lrbuildz
    +lrbuildzs
    +lrpack
    +lrprocess
    +lrrmserror
    +lrunpack
    + + + +
    linreg_d_basic Linear regression used to build the very basic model and unpack coefficients
    + +
    +
    /************************************************************************* -Proper scaling is very important for the algorithm performance. It is less -important for the quality of results, but still has some influence (it is -easier to converge when variables are properly scaled, so premature -stopping is possible when very badly scalled variables are combined with -relaxed stopping conditions). +*************************************************************************/ +
    class linearmodel +{ +}; -INPUT PARAMETERS: - State - structure stores algorithm state - S - array[N], non-zero scaling coefficients - S[i] may be negative, sign doesn't matter. +
    + +
    +
    /************************************************************************* +LRReport structure contains additional information about linear model: +* C - covariation matrix, array[0..NVars,0..NVars]. + C[i,j] = Cov(A[i],A[j]) +* RMSError - root mean square error on a training set +* AvgError - average error on a training set +* AvgRelError - average relative error on a training set (excluding + observations with zero function value). +* CVRMSError - leave-one-out cross-validation estimate of + generalization error. Calculated using fast algorithm + with O(NVars*NPoints) complexity. +* CVAvgError - cross-validation estimate of average error +* CVAvgRelError - cross-validation estimate of average relative error - -- ALGLIB -- - Copyright 14.01.2011 by Bochkanov Sergey +All other fields of the structure are intended for internal use and should +not be used outside ALGLIB. *************************************************************************/ -
    void alglib::lsfitsetscale(lsfitstate state, real_1d_array s); +
    class lrreport +{ + real_2d_array c; + double rmserror; + double avgerror; + double avgrelerror; + double cvrmserror; + double cvavgerror; + double cvavgrelerror; + ae_int_t ncvdefects; + integer_1d_array cvdefects; +};
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets maximum step length +Average error on the test set INPUT PARAMETERS: - State - structure which stores algorithm state - StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't - want to limit step length. - -Use this subroutine when you optimize target function which contains exp() -or other fast growing functions, and optimization algorithm makes too -large steps which leads to overflow. This function allows us to reject -steps that are too large (and therefore expose us to the possible -overflow) without actually calculating function value at the x+stp*d. + LM - linear model + XY - test set + NPoints - test set size -NOTE: non-zero StpMax leads to moderate performance degradation because -intermediate step of preconditioned L-BFGS optimization is incompatible -with limits on step size. +RESULT: + average error. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitsetstpmax(lsfitstate state, double stpmax); +
    double alglib::lravgerror( + linearmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function turns on/off reporting. +RMS error on the test set INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not - -When reports are needed, State.C (current parameters) and State.F (current -value of fitting function) are reported. + LM - linear model + XY - test set + NPoints - test set size +RESULT: + average relative error. -- ALGLIB -- - Copyright 15.08.2010 by Bochkanov Sergey + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lsfitsetxrep(lsfitstate state, bool needxrep); +
    double alglib::lravgrelerror( + linearmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine fits piecewise linear curve to points with Ramer-Douglas- -Peucker algorithm, which stops after achieving desired precision. +Linear regression -IMPORTANT: -* it performs non-least-squares fitting; it builds curve, but this curve - does not minimize some least squares metric. See description of RDP - algorithm (say, in Wikipedia) for more details on WHAT is performed. -* this function does NOT work with parametric curves (i.e. curves which - can be represented as {X(t),Y(t)}. It works with curves which can be - represented as Y(X). Thus, it is impossible to model figures like circles - with this functions. - If you want to work with parametric curves, you should use - ParametricRDPFixed() function provided by "Parametric" subpackage of - "Interpolation" package. +Subroutine builds model: -INPUT PARAMETERS: - X - array of X-coordinates: - * at least N elements - * can be unordered (points are automatically sorted) - * this function may accept non-distinct X (see below for - more information on handling of such inputs) - Y - array of Y-coordinates: - * at least N elements - N - number of elements in X/Y - Eps - positive number, desired precision. + Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) +and model found in ALGLIB format, covariation matrix, training set errors +(rms, average, average relative) and leave-one-out cross-validation +estimate of the generalization error. CV estimate calculated using fast +algorithm with O(NPoints*NVars) complexity. -OUTPUT PARAMETERS: - X2 - X-values of corner points for piecewise approximation, - has length NSections+1 or zero (for NSections=0). - Y2 - Y-values of corner points, - has length NSections+1 or zero (for NSections=0). - NSections- number of sections found by algorithm, - NSections can be zero for degenerate datasets - (N<=1 or all X[] are non-distinct). +When covariation matrix is calculated standard deviations of function +values are assumed to be equal to RMS error on the training set. + +INPUT PARAMETERS: + XY - training set, array [0..NPoints-1,0..NVars]: + * NVars columns - independent variables + * last column - dependent variable + NPoints - training set size, NPoints>NVars+1 + NVars - number of independent variables + +OUTPUT PARAMETERS: + Info - return code: + * -255, in case of unknown internal error + * -4, if internal SVD subroutine haven't converged + * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1). + * 1, if subroutine successfully finished + LM - linear model in the ALGLIB format. Use subroutines of + this unit to work with the model. + AR - additional results -NOTE: X2/Y2 are ordered arrays, i.e. (X2[0],Y2[0]) is a first point of - curve, (X2[NSection-1],Y2[NSection-1]) is the last point. -- ALGLIB -- - Copyright 02.10.2014 by Bochkanov Sergey + Copyright 02.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lstfitpiecewiselinearrdp( - real_1d_array x, - real_1d_array y, - ae_int_t n, - double eps, - real_1d_array& x2, - real_1d_array& y2, - ae_int_t& nsections); +
    void alglib::lrbuild( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t& info, + linearmodel& lm, + lrreport& ar, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine fits piecewise linear curve to points with Ramer-Douglas- -Peucker algorithm, which stops after generating specified number of linear -sections. +Linear regression -IMPORTANT: -* it does NOT perform least-squares fitting; it builds curve, but this - curve does not minimize some least squares metric. See description of - RDP algorithm (say, in Wikipedia) for more details on WHAT is performed. -* this function does NOT work with parametric curves (i.e. curves which - can be represented as {X(t),Y(t)}. It works with curves which can be - represented as Y(X). Thus, it is impossible to model figures like - circles with this functions. - If you want to work with parametric curves, you should use - ParametricRDPFixed() function provided by "Parametric" subpackage of - "Interpolation" package. +Variant of LRBuild which uses vector of standatd deviations (errors in +function values). INPUT PARAMETERS: - X - array of X-coordinates: - * at least N elements - * can be unordered (points are automatically sorted) - * this function may accept non-distinct X (see below for - more information on handling of such inputs) - Y - array of Y-coordinates: - * at least N elements - N - number of elements in X/Y - M - desired number of sections: - * at most M sections are generated by this function - * less than M sections can be generated if we have N<M - (or some X are non-distinct). + XY - training set, array [0..NPoints-1,0..NVars]: + * NVars columns - independent variables + * last column - dependent variable + S - standard deviations (errors in function values) + array[0..NPoints-1], S[i]>0. + NPoints - training set size, NPoints>NVars+1 + NVars - number of independent variables OUTPUT PARAMETERS: - X2 - X-values of corner points for piecewise approximation, - has length NSections+1 or zero (for NSections=0). - Y2 - Y-values of corner points, - has length NSections+1 or zero (for NSections=0). - NSections- number of sections found by algorithm, NSections<=M, - NSections can be zero for degenerate datasets - (N<=1 or all X[] are non-distinct). + Info - return code: + * -255, in case of unknown internal error + * -4, if internal SVD subroutine haven't converged + * -1, if incorrect parameters was passed (NPoints<NVars+2, NVars<1). + * -2, if S[I]<=0 + * 1, if subroutine successfully finished + LM - linear model in the ALGLIB format. Use subroutines of + this unit to work with the model. + AR - additional results -NOTE: X2/Y2 are ordered arrays, i.e. (X2[0],Y2[0]) is a first point of - curve, (X2[NSection-1],Y2[NSection-1]) is the last point. -- ALGLIB -- - Copyright 02.10.2014 by Bochkanov Sergey + Copyright 02.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::lstfitpiecewiselinearrdpfixed( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - real_1d_array& x2, - real_1d_array& y2, - ae_int_t& nsections); +
    void alglib::lrbuilds( + real_2d_array xy, + real_1d_array s, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t& info, + linearmodel& lm, + lrreport& ar, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Fitting by polynomials in barycentric form. This function provides simple -unterface for unconstrained unweighted fitting. See PolynomialFitWC() if -you need constrained fitting. - -Task is linear, so linear least squares solver is used. Complexity of this -computational scheme is O(N*M^2), mostly dominated by least squares solver +Like LRBuild but builds model -SEE ALSO: - PolynomialFitWC() + Y = A(0)*X[0] + ... + A(N-1)*X[N-1] -COMMERCIAL EDITION OF ALGLIB: +i.e. with zero constant term. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + -- ALGLIB -- + Copyright 30.10.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lrbuildz( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t& info, + linearmodel& lm, + lrreport& ar, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - N - number of points, N>0 - * if given, only leading N elements of X/Y are used - * if not given, automatically determined from sizes of X/Y - M - number of basis functions (= polynomial_degree + 1), M>=1 +
    + +
    +
    /************************************************************************* +Like LRBuildS, but builds model -OUTPUT PARAMETERS: - Info- same format as in LSFitLinearW() subroutine: - * Info>0 task is solved - * Info<=0 an error occured: - -4 means inconvergence of internal SVD - P - interpolant in barycentric form. - Rep - report, same format as in LSFitLinearW() subroutine. - Following fields are set: - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED + Y = A(0)*X[0] + ... + A(N-1)*X[N-1] -NOTES: - you can convert P from barycentric form to the power or Chebyshev - basis with PolynomialBar2Pow() or PolynomialBar2Cheb() functions from - POLINT subpackage. +i.e. with zero constant term. - -- ALGLIB PROJECT -- - Copyright 10.12.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 30.10.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::polynomialfit( - real_1d_array x, - real_1d_array y, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); -void alglib::polynomialfit( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); -void alglib::smp_polynomialfit( - real_1d_array x, - real_1d_array y, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); -void alglib::smp_polynomialfit( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, +
    void alglib::lrbuildzs( + real_2d_array xy, + real_1d_array s, + ae_int_t npoints, + ae_int_t nvars, ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); + linearmodel& lm, + lrreport& ar, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Weighted fitting by polynomials in barycentric form, with constraints on -function values or first derivatives. - -Small regularizing term is used when solving constrained tasks (to improve -stability). +"Packs" coefficients and creates linear model in ALGLIB format (LRUnpack +reversed). -Task is linear, so linear least squares solver is used. Complexity of this -computational scheme is O(N*M^2), mostly dominated by least squares solver +INPUT PARAMETERS: + V - coefficients, array[0..NVars] + NVars - number of independent variables -SEE ALSO: - PolynomialFit() +OUTPUT PAREMETERS: + LM - linear model. -COMMERCIAL EDITION OF ALGLIB: + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lrpack( + real_1d_array v, + ae_int_t nvars, + linearmodel& lm, + const xparams _params = alglib::xdefault); - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +
    + +
    +
    /************************************************************************* +Procesing INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - W - weights, array[0..N-1] - Each summand in square sum of approximation deviations from - given values is multiplied by the square of corresponding - weight. Fill it by 1's if you don't want to solve weighted - task. - N - number of points, N>0. - * if given, only leading N elements of X/Y/W are used - * if not given, automatically determined from sizes of X/Y/W - XC - points where polynomial values/derivatives are constrained, - array[0..K-1]. - YC - values of constraints, array[0..K-1] - DC - array[0..K-1], types of constraints: - * DC[i]=0 means that P(XC[i])=YC[i] - * DC[i]=1 means that P'(XC[i])=YC[i] - SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS - K - number of constraints, 0<=K<M. - K=0 means no constraints (XC/YC/DC are not used in such cases) - M - number of basis functions (= polynomial_degree + 1), M>=1 - -OUTPUT PARAMETERS: - Info- same format as in LSFitLinearW() subroutine: - * Info>0 task is solved - * Info<=0 an error occured: - -4 means inconvergence of internal SVD - -3 means inconsistent constraints - P - interpolant in barycentric form. - Rep - report, same format as in LSFitLinearW() subroutine. - Following fields are set: - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED + LM - linear model + X - input vector, array[0..NVars-1]. -IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. +Result: + value of linear model regression estimate -NOTES: - you can convert P from barycentric form to the power or Chebyshev - basis with PolynomialBar2Pow() or PolynomialBar2Cheb() functions from - POLINT subpackage. + -- ALGLIB -- + Copyright 03.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::lrprocess( + linearmodel lm, + real_1d_array x, + const xparams _params = alglib::xdefault); -SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: +
    + +
    +
    /************************************************************************* +RMS error on the test set -Setting constraints can lead to undesired results, like ill-conditioned -behavior, or inconsistency being detected. From the other side, it allows -us to improve quality of the fit. Here we summarize our experience with -constrained regression splines: -* even simple constraints can be inconsistent, see Wikipedia article on - this subject: http://en.wikipedia.org/wiki/Birkhoff_interpolation -* the greater is M (given fixed constraints), the more chances that - constraints will be consistent -* in the general case, consistency of constraints is NOT GUARANTEED. -* in the one special cases, however, we can guarantee consistency. This - case is: M>1 and constraints on the function values (NOT DERIVATIVES) +INPUT PARAMETERS: + LM - linear model + XY - test set + NPoints - test set size -Our final recommendation is to use constraints WHEN AND ONLY when you -can't solve your task without them. Anything beyond special cases given -above is not guaranteed and may result in inconsistency. +RESULT: + root mean square error. - -- ALGLIB PROJECT -- - Copyright 10.12.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::polynomialfitwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); -void alglib::polynomialfitwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); -void alglib::smp_polynomialfitwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); -void alglib::smp_polynomialfitwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - barycentricinterpolant& p, - polynomialfitreport& rep); +
    double alglib::lrrmserror( + linearmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Least squares fitting by cubic spline. - -This subroutine is "lightweight" alternative for more complex and feature- -rich Spline1DFitCubicWC(). See Spline1DFitCubicWC() for more information -about subroutine parameters (we don't duplicate it here because of length) +Unpacks coefficients of linear model. -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + LM - linear model in ALGLIB format - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +OUTPUT PARAMETERS: + V - coefficients, array[0..NVars] + constant term (intercept) is stored in the V[NVars]. + NVars - number of independent variables (one less than number + of coefficients) - -- ALGLIB PROJECT -- - Copyright 18.08.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dfitcubic( - real_1d_array x, - real_1d_array y, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::spline1dfitcubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitcubic( - real_1d_array x, - real_1d_array y, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitcubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); +
    void alglib::lrunpack( + linearmodel lm, + real_1d_array& v, + ae_int_t& nvars, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -Weighted fitting by cubic spline, with constraints on function values or -derivatives. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" -Equidistant grid with M-2 nodes on [min(x,xc),max(x,xc)] is used to build -basis functions. Basis functions are cubic splines with continuous second -derivatives and non-fixed first derivatives at interval ends. Small -regularizing term is used when solving constrained tasks (to improve -stability). +using namespace alglib; -Task is linear, so linear least squares solver is used. Complexity of this -computational scheme is O(N*M^2), mostly dominated by least squares solver -SEE ALSO - Spline1DFitHermiteWC() - fitting by Hermite splines (more flexible, - less smooth) - Spline1DFitCubic() - "lightweight" fitting by cubic splines, - without invididual weights and constraints +int main(int argc, char **argv) +{ + // + // In this example we demonstrate linear fitting by f(x|a) = a*exp(0.5*x). + // + // We have: + // * xy - matrix of basic function values (exp(0.5*x)) and expected values + // + real_2d_array xy = "[[0.606531,1.133719],[0.670320,1.306522],[0.740818,1.504604],[0.818731,1.554663],[0.904837,1.884638],[1.000000,2.072436],[1.105171,2.257285],[1.221403,2.534068],[1.349859,2.622017],[1.491825,2.897713],[1.648721,3.219371]]"; + ae_int_t info; + ae_int_t nvars; + linearmodel model; + lrreport rep; + real_1d_array c; -COMMERCIAL EDITION OF ALGLIB: + lrbuildz(xy, 11, 1, info, model, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + lrunpack(model, c, nvars); + printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [1.98650,0.00000] + return 0; +} - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - W - weights, array[0..N-1] - Each summand in square sum of approximation deviations from - given values is multiplied by the square of corresponding - weight. Fill it by 1's if you don't want to solve weighted - task. - N - number of points (optional): - * N>0 - * if given, only first N elements of X/Y/W are processed - * if not given, automatically determined from X/Y/W sizes - XC - points where spline values/derivatives are constrained, - array[0..K-1]. - YC - values of constraints, array[0..K-1] - DC - array[0..K-1], types of constraints: - * DC[i]=0 means that S(XC[i])=YC[i] - * DC[i]=1 means that S'(XC[i])=YC[i] - SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS - K - number of constraints (optional): - * 0<=K<M. - * K=0 means no constraints (XC/YC/DC are not used) - * if given, only first K elements of XC/YC/DC are used - * if not given, automatically determined from XC/YC/DC - M - number of basis functions ( = number_of_nodes+2), M>=4. +
    + + +
    +
    /************************************************************************* -OUTPUT PARAMETERS: - Info- same format as in LSFitLinearWC() subroutine. - * Info>0 task is solved - * Info<=0 an error occured: - -4 means inconvergence of internal SVD - -3 means inconsistent constraints - S - spline interpolant. - Rep - report, same format as in LSFitLinearWC() subroutine. - Following fields are set: - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED +*************************************************************************/ +
    class logitmodel +{ +}; -IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. +
    + +
    +
    /************************************************************************* +MNLReport structure contains information about training process: +* NGrad - number of gradient calculations +* NHess - number of Hessian calculations +*************************************************************************/ +
    class mnlreport +{ + ae_int_t ngrad; + ae_int_t nhess; +}; +
    + +
    +
    /************************************************************************* +Average cross-entropy (in bits per element) on the test set -ORDER OF POINTS +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size -Subroutine automatically sorts points, so caller may pass unsorted array. +RESULT: + CrossEntropy/(NPoints*ln(2)). -SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mnlavgce( + logitmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); -Setting constraints can lead to undesired results, like ill-conditioned -behavior, or inconsistency being detected. From the other side, it allows -us to improve quality of the fit. Here we summarize our experience with -constrained regression splines: -* excessive constraints can be inconsistent. Splines are piecewise cubic - functions, and it is easy to create an example, where large number of - constraints concentrated in small area will result in inconsistency. - Just because spline is not flexible enough to satisfy all of them. And - same constraints spread across the [min(x),max(x)] will be perfectly - consistent. -* the more evenly constraints are spread across [min(x),max(x)], the more - chances that they will be consistent -* the greater is M (given fixed constraints), the more chances that - constraints will be consistent -* in the general case, consistency of constraints IS NOT GUARANTEED. -* in the several special cases, however, we CAN guarantee consistency. -* one of this cases is constraints on the function values AND/OR its - derivatives at the interval boundaries. -* another special case is ONE constraint on the function value (OR, but - not AND, derivative) anywhere in the interval +
    + +
    +
    /************************************************************************* +Average error on the test set -Our final recommendation is to use constraints WHEN AND ONLY WHEN you -can't solve your task without them. Anything beyond special cases given -above is not guaranteed and may result in inconsistency. +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size +RESULT: + average error (error when estimating posterior probabilities). - -- ALGLIB PROJECT -- - Copyright 18.08.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dfitcubicwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::spline1dfitcubicwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitcubicwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitcubicwc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); +
    double alglib::mnlavgerror( + logitmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Least squares fitting by Hermite spline. - -This subroutine is "lightweight" alternative for more complex and feature- -rich Spline1DFitHermiteWC(). See Spline1DFitHermiteWC() description for -more information about subroutine parameters (we don't duplicate it here -because of length). +Average relative error on the test set -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +RESULT: + average relative error (error when estimating posterior probabilities). - -- ALGLIB PROJECT -- - Copyright 18.08.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dfithermite( - real_1d_array x, - real_1d_array y, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::spline1dfithermite( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfithermite( - real_1d_array x, - real_1d_array y, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfithermite( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); +
    double alglib::mnlavgrelerror( + logitmodel lm, + real_2d_array xy, + ae_int_t ssize, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Weighted fitting by Hermite spline, with constraints on function values -or first derivatives. - -Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build -basis functions. Basis functions are Hermite splines. Small regularizing -term is used when solving constrained tasks (to improve stability). - -Task is linear, so linear least squares solver is used. Complexity of this -computational scheme is O(N*M^2), mostly dominated by least squares solver - -SEE ALSO - Spline1DFitCubicWC() - fitting by Cubic splines (less flexible, - more smooth) - Spline1DFitHermite() - "lightweight" Hermite fitting, without - invididual weights and constraints +Classification error on test set = MNLRelClsError*NPoints -COMMERCIAL EDITION OF ALGLIB: + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mnlclserror( + logitmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +
    + +
    +
    /************************************************************************* +"Packs" coefficients and creates logit model in ALGLIB format (MNLUnpack +reversed). INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - W - weights, array[0..N-1] - Each summand in square sum of approximation deviations from - given values is multiplied by the square of corresponding - weight. Fill it by 1's if you don't want to solve weighted - task. - N - number of points (optional): - * N>0 - * if given, only first N elements of X/Y/W are processed - * if not given, automatically determined from X/Y/W sizes - XC - points where spline values/derivatives are constrained, - array[0..K-1]. - YC - values of constraints, array[0..K-1] - DC - array[0..K-1], types of constraints: - * DC[i]=0 means that S(XC[i])=YC[i] - * DC[i]=1 means that S'(XC[i])=YC[i] - SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS - K - number of constraints (optional): - * 0<=K<M. - * K=0 means no constraints (XC/YC/DC are not used) - * if given, only first K elements of XC/YC/DC are used - * if not given, automatically determined from XC/YC/DC - M - number of basis functions (= 2 * number of nodes), - M>=4, - M IS EVEN! + A - model (see MNLUnpack) + NVars - number of independent variables + NClasses - number of classes OUTPUT PARAMETERS: - Info- same format as in LSFitLinearW() subroutine: - * Info>0 task is solved - * Info<=0 an error occured: - -4 means inconvergence of internal SVD - -3 means inconsistent constraints - -2 means odd M was passed (which is not supported) - -1 means another errors in parameters passed - (N<=0, for example) - S - spline interpolant. - Rep - report, same format as in LSFitLinearW() subroutine. - Following fields are set: - * RMSError rms error on the (X,Y). - * AvgError average error on the (X,Y). - * AvgRelError average relative error on the non-zero Y - * MaxError maximum error - NON-WEIGHTED ERRORS ARE CALCULATED - -IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. + LM - logit model. -IMPORTANT: - this subroitine supports only even M's + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mnlpack( + real_2d_array a, + ae_int_t nvars, + ae_int_t nclasses, + logitmodel& lm, + const xparams _params = alglib::xdefault); +
    + +
    +
    /************************************************************************* +Procesing -ORDER OF POINTS +INPUT PARAMETERS: + LM - logit model, passed by non-constant reference + (some fields of structure are used as temporaries + when calculating model output). + X - input vector, array[0..NVars-1]. + Y - (possibly) preallocated buffer; if size of Y is less than + NClasses, it will be reallocated.If it is large enough, it + is NOT reallocated, so we can save some time on reallocation. -Subroutine automatically sorts points, so caller may pass unsorted array. +OUTPUT PARAMETERS: + Y - result, array[0..NClasses-1] + Vector of posterior probabilities for classification task. -SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mnlprocess( + logitmodel lm, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); -Setting constraints can lead to undesired results, like ill-conditioned -behavior, or inconsistency being detected. From the other side, it allows -us to improve quality of the fit. Here we summarize our experience with -constrained regression splines: -* excessive constraints can be inconsistent. Splines are piecewise cubic - functions, and it is easy to create an example, where large number of - constraints concentrated in small area will result in inconsistency. - Just because spline is not flexible enough to satisfy all of them. And - same constraints spread across the [min(x),max(x)] will be perfectly - consistent. -* the more evenly constraints are spread across [min(x),max(x)], the more - chances that they will be consistent -* the greater is M (given fixed constraints), the more chances that - constraints will be consistent -* in the general case, consistency of constraints is NOT GUARANTEED. -* in the several special cases, however, we can guarantee consistency. -* one of this cases is M>=4 and constraints on the function value - (AND/OR its derivative) at the interval boundaries. -* another special case is M>=4 and ONE constraint on the function value - (OR, BUT NOT AND, derivative) anywhere in [min(x),max(x)] +
    + +
    +
    /************************************************************************* +'interactive' variant of MNLProcess for languages like Python which +support constructs like "Y = MNLProcess(LM,X)" and interactive mode of the +interpreter -Our final recommendation is to use constraints WHEN AND ONLY when you -can't solve your task without them. Anything beyond special cases given -above is not guaranteed and may result in inconsistency. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. - -- ALGLIB PROJECT -- - Copyright 18.08.2009 by Bochkanov Sergey + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dfithermitewc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::spline1dfithermitewc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfithermitewc( - real_1d_array x, - real_1d_array y, - real_1d_array w, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfithermitewc( +
    void alglib::mnlprocessi( + logitmodel lm, real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - real_1d_array xc, - real_1d_array yc, - integer_1d_array dc, - ae_int_t k, - ae_int_t m, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Fitting by penalized cubic spline. +Relative classification error on the test set -Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build -basis functions. Basis functions are cubic splines with natural boundary -conditions. Problem is regularized by adding non-linearity penalty to the -usual least squares penalty function: - - S(x) = arg min { LS + P }, where - LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares penalty - P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity penalty - rho - tunable constant given by user - C - automatically determined scale parameter, - makes penalty invariant with respect to scaling of X, Y, W. +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size -COMMERCIAL EDITION OF ALGLIB: +RESULT: + percent of incorrectly classified cases. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mnlrelclserror( + logitmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - X - points, array[0..N-1]. - Y - function values, array[0..N-1]. - N - number of points (optional): - * N>0 - * if given, only first N elements of X/Y are processed - * if not given, automatically determined from X/Y sizes - M - number of basis functions ( = number_of_nodes), M>=4. - Rho - regularization constant passed by user. It penalizes - nonlinearity in the regression spline. It is logarithmically - scaled, i.e. actual value of regularization constant is - calculated as 10^Rho. It is automatically scaled so that: - * Rho=2.0 corresponds to moderate amount of nonlinearity - * generally, it should be somewhere in the [-8.0,+8.0] - If you do not want to penalize nonlineary, - pass small Rho. Values as low as -15 should work. +
    + +
    +
    /************************************************************************* +RMS error on the test set -OUTPUT PARAMETERS: +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size + +RESULT: + root mean square error (error when estimating posterior probabilities). + + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mnlrmserror( + logitmodel lm, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine trains logit model. + +INPUT PARAMETERS: + XY - training set, array[0..NPoints-1,0..NVars] + First NVars columns store values of independent + variables, next column stores number of class (from 0 + to NClasses-1) which dataset element belongs to. Fractional + values are rounded to nearest integer. + NPoints - training set size, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 + +OUTPUT PARAMETERS: + Info - return code: + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed + (NPoints<NVars+2, NVars<1, NClasses<2). + * 1, if task has been solved + LM - model built + Rep - training report + + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mnltrainh( + real_2d_array xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nclasses, + ae_int_t& info, + logitmodel& lm, + mnlreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Unpacks coefficients of logit model. Logit model have form: + + P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1)) + S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), when i<M-1 + S(M-1) = 1 + +INPUT PARAMETERS: + LM - logit model in ALGLIB format + +OUTPUT PARAMETERS: + V - coefficients, array[0..NClasses-2,0..NVars] + NVars - number of independent variables + NClasses - number of classes + + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mnlunpack( + logitmodel lm, + real_2d_array& a, + ae_int_t& nvars, + ae_int_t& nclasses, + const xparams _params = alglib::xdefault); + +
    + +
    + +barycentricfitreport
    +lsfitreport
    +lsfitstate
    +polynomialfitreport
    + +barycentricfitfloaterhormann
    +barycentricfitfloaterhormannwc
    +logisticcalc4
    +logisticcalc5
    +logisticfit4
    +logisticfit45x
    +logisticfit4ec
    +logisticfit5
    +logisticfit5ec
    +lsfitcreatef
    +lsfitcreatefg
    +lsfitcreatefgh
    +lsfitcreatewf
    +lsfitcreatewfg
    +lsfitcreatewfgh
    +lsfitfit
    +lsfitlinear
    +lsfitlinearc
    +lsfitlinearw
    +lsfitlinearwc
    +lsfitresults
    +lsfitsetbc
    +lsfitsetcond
    +lsfitsetgradientcheck
    +lsfitsetlc
    +lsfitsetscale
    +lsfitsetstpmax
    +lsfitsetxrep
    +lstfitpiecewiselinearrdp
    +lstfitpiecewiselinearrdpfixed
    +polynomialfit
    +polynomialfitwc
    +spline1dfitcubic
    +spline1dfitcubicwc
    +spline1dfithermite
    +spline1dfithermitewc
    + + + + + + + + + + + + + + +
    lsfit_d_lin Unconstrained (general) linear least squares fitting with and without weights
    lsfit_d_linc Constrained (general) linear least squares fitting with and without weights
    lsfit_d_nlf Nonlinear fitting using function value only
    lsfit_d_nlfb Bound contstrained nonlinear fitting using function value only
    lsfit_d_nlfg Nonlinear fitting using gradient
    lsfit_d_nlfgh Nonlinear fitting using gradient and Hessian
    lsfit_d_nlscale Nonlinear fitting with custom scaling and bound constraints
    lsfit_d_pol Unconstrained polynomial fitting
    lsfit_d_polc Constrained polynomial fitting
    lsfit_d_spline Unconstrained fitting by penalized regression spline
    lsfit_t_4pl 4-parameter logistic fitting
    lsfit_t_5pl 5-parameter logistic fitting
    + +
    +
    /************************************************************************* +Barycentric fitting report: + RMSError RMS error + AvgError average error + AvgRelError average relative error (for non-zero Y[I]) + MaxError maximum error + TaskRCond reciprocal of task's condition number +*************************************************************************/ +
    class barycentricfitreport +{ + double taskrcond; + ae_int_t dbest; + double rmserror; + double avgerror; + double avgrelerror; + double maxerror; +}; + +
    + +
    +
    /************************************************************************* +Least squares fitting report. This structure contains informational fields +which are set by fitting functions provided by this unit. + +Different functions initialize different sets of fields, so you should +read documentation on specific function you used in order to know which +fields are initialized. + + TaskRCond reciprocal of task's condition number + IterationsCount number of internal iterations + + VarIdx if user-supplied gradient contains errors which were + detected by nonlinear fitter, this field is set to + index of the first component of gradient which is + suspected to be spoiled by bugs. + + RMSError RMS error + AvgError average error + AvgRelError average relative error (for non-zero Y[I]) + MaxError maximum error + + WRMSError weighted RMS error + + CovPar covariance matrix for parameters, filled by some solvers + ErrPar vector of errors in parameters, filled by some solvers + ErrCurve vector of fit errors - variability of the best-fit + curve, filled by some solvers. + Noise vector of per-point noise estimates, filled by + some solvers. + R2 coefficient of determination (non-weighted, non-adjusted), + filled by some solvers. +*************************************************************************/ +
    class lsfitreport +{ + double taskrcond; + ae_int_t iterationscount; + ae_int_t varidx; + double rmserror; + double avgerror; + double avgrelerror; + double maxerror; + double wrmserror; + real_2d_array covpar; + real_1d_array errpar; + real_1d_array errcurve; + real_1d_array noise; + double r2; +}; + +
    + +
    +
    /************************************************************************* +Nonlinear fitter. + +You should use ALGLIB functions to work with fitter. +Never try to access its fields directly! +*************************************************************************/ +
    class lsfitstate +{ +}; + +
    + +
    +
    /************************************************************************* +Polynomial fitting report: + TaskRCond reciprocal of task's condition number + RMSError RMS error + AvgError average error + AvgRelError average relative error (for non-zero Y[I]) + MaxError maximum error +*************************************************************************/ +
    class polynomialfitreport +{ + double taskrcond; + double rmserror; + double avgerror; + double avgrelerror; + double maxerror; +}; + +
    + +
    +
    /************************************************************************* +Rational least squares fitting using Floater-Hormann rational functions +with optimal D chosen from [0,9]. + +Equidistant grid with M node on [min(x),max(x)] is used to build basis +functions. Different values of D are tried, optimal D (least root mean +square error) is chosen. Task is linear, so linear least squares solver +is used. Complexity of this computational scheme is O(N*M^2) (mostly +dominated by the least squares solver). + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + X - points, array[0..N-1]. + Y - function values, array[0..N-1]. + N - number of points, N>0. + M - number of basis functions ( = number_of_nodes), M>=2. + +OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: - -4 means inconvergence of internal SVD or - Cholesky decomposition; problem may be - too ill-conditioned (very rare) - S - spline interpolant. - Rep - Following fields are set: + -4 means inconvergence of internal SVD + -3 means inconsistent constraints + B - barycentric interpolant. + Rep - report, same format as in LSFitLinearWC() subroutine. + Following fields are set: + * DBest best value of the D parameter * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y * MaxError maximum error NON-WEIGHTED ERRORS ARE CALCULATED -IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. - -NOTE 1: additional nodes are added to the spline outside of the fitting -interval to force linearity when x<min(x,xc) or x>max(x,xc). It is done -for consistency - we penalize non-linearity at [min(x,xc),max(x,xc)], so -it is natural to force linearity outside of this interval. - -NOTE 2: function automatically sorts points, so caller may pass unsorted -array. - -- ALGLIB PROJECT -- Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dfitpenalized( - real_1d_array x, - real_1d_array y, - ae_int_t m, - double rho, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::spline1dfitpenalized( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t m, - double rho, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitpenalized( - real_1d_array x, - real_1d_array y, - ae_int_t m, - double rho, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitpenalized( +
    void alglib::barycentricfitfloaterhormann( real_1d_array x, real_1d_array y, ae_int_t n, ae_int_t m, - double rho, ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); + barycentricinterpolant& b, + barycentricfitreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Weighted fitting by penalized cubic spline. +Weghted rational least squares fitting using Floater-Hormann rational +functions with optimal D chosen from [0,9], with constraints and +individual weights. -Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build -basis functions. Basis functions are cubic splines with natural boundary -conditions. Problem is regularized by adding non-linearity penalty to the -usual least squares penalty function: - - S(x) = arg min { LS + P }, where - LS = SUM { w[i]^2*(y[i] - S(x[i]))^2 } - least squares penalty - P = C*10^rho*integral{ S''(x)^2*dx } - non-linearity penalty - rho - tunable constant given by user - C - automatically determined scale parameter, - makes penalty invariant with respect to scaling of X, Y, W. +Equidistant grid with M node on [min(x),max(x)] is used to build basis +functions. Different values of D are tried, optimal D (least WEIGHTED root +mean square error) is chosen. Task is linear, so linear least squares +solver is used. Complexity of this computational scheme is O(N*M^2) +(mostly dominated by the least squares solver). -COMMERCIAL EDITION OF ALGLIB: +SEE ALSO +* BarycentricFitFloaterHormann(), "lightweight" fitting without invididual + weights and constraints. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Speed-up provided by multithreading greatly depends on problem size - ! - only large problems (number of coefficients is more than 500) can be - ! efficiently multithreaded. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -18764,30 +19395,31 @@ Each summand in square sum of approximation deviations from given values is multiplied by the square of corresponding weight. Fill it by 1's if you don't want to solve weighted - problem. - N - number of points (optional): - * N>0 - * if given, only first N elements of X/Y/W are processed - * if not given, automatically determined from X/Y/W sizes - M - number of basis functions ( = number_of_nodes), M>=4. - Rho - regularization constant passed by user. It penalizes - nonlinearity in the regression spline. It is logarithmically - scaled, i.e. actual value of regularization constant is - calculated as 10^Rho. It is automatically scaled so that: - * Rho=2.0 corresponds to moderate amount of nonlinearity - * generally, it should be somewhere in the [-8.0,+8.0] - If you do not want to penalize nonlineary, - pass small Rho. Values as low as -15 should work. + task. + N - number of points, N>0. + XC - points where function values/derivatives are constrained, + array[0..K-1]. + YC - values of constraints, array[0..K-1] + DC - array[0..K-1], types of constraints: + * DC[i]=0 means that S(XC[i])=YC[i] + * DC[i]=1 means that S'(XC[i])=YC[i] + SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS + K - number of constraints, 0<=K<M. + K=0 means no constraints (XC/YC/DC are not used in such cases) + M - number of basis functions ( = number_of_nodes), M>=2. OUTPUT PARAMETERS: Info- same format as in LSFitLinearWC() subroutine. * Info>0 task is solved * Info<=0 an error occured: - -4 means inconvergence of internal SVD or - Cholesky decomposition; problem may be - too ill-conditioned (very rare) - S - spline interpolant. - Rep - Following fields are set: + -4 means inconvergence of internal SVD + -3 means inconsistent constraints + -1 means another errors in parameters passed + (N<=0, for example) + B - barycentric interpolant. + Rep - report, same format as in LSFitLinearWC() subroutine. + Following fields are set: + * DBest best value of the D parameter * RMSError rms error on the (X,Y). * AvgError average error on the (X,Y). * AvgRelError average relative error on the non-zero Y @@ -18795,5499 +19427,5879 @@ NON-WEIGHTED ERRORS ARE CALCULATED IMPORTANT: - this subroitine doesn't calculate task's condition number for K<>0. + this subroutine doesn't calculate task's condition number for K<>0. + +SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: -NOTE 1: additional nodes are added to the spline outside of the fitting -interval to force linearity when x<min(x,xc) or x>max(x,xc). It is done -for consistency - we penalize non-linearity at [min(x,xc),max(x,xc)], so -it is natural to force linearity outside of this interval. +Setting constraints can lead to undesired results, like ill-conditioned +behavior, or inconsistency being detected. From the other side, it allows +us to improve quality of the fit. Here we summarize our experience with +constrained barycentric interpolants: +* excessive constraints can be inconsistent. Floater-Hormann basis + functions aren't as flexible as splines (although they are very smooth). +* the more evenly constraints are spread across [min(x),max(x)], the more + chances that they will be consistent +* the greater is M (given fixed constraints), the more chances that + constraints will be consistent +* in the general case, consistency of constraints IS NOT GUARANTEED. +* in the several special cases, however, we CAN guarantee consistency. +* one of this cases is constraints on the function VALUES at the interval + boundaries. Note that consustency of the constraints on the function + DERIVATIVES is NOT guaranteed (you can use in such cases cubic splines + which are more flexible). +* another special case is ONE constraint on the function value (OR, but + not AND, derivative) anywhere in the interval -NOTE 2: function automatically sorts points, so caller may pass unsorted -array. +Our final recommendation is to use constraints WHEN AND ONLY WHEN you +can't solve your task without them. Anything beyond special cases given +above is not guaranteed and may result in inconsistency. -- ALGLIB PROJECT -- - Copyright 19.10.2010 by Bochkanov Sergey + Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dfitpenalizedw( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t m, - double rho, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::spline1dfitpenalizedw( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - ae_int_t m, - double rho, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitpenalizedw( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t m, - double rho, - ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); -void alglib::smp_spline1dfitpenalizedw( +
    void alglib::barycentricfitfloaterhormannwc( real_1d_array x, real_1d_array y, real_1d_array w, ae_int_t n, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t k, ae_int_t m, - double rho, ae_int_t& info, - spline1dinterpolant& s, - spline1dfitreport& rep); + barycentricinterpolant& b, + barycentricfitreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +
    /************************************************************************* +This function calculates value of four-parameter logistic (4PL) model at +specified point X. 4PL model has following form: -using namespace alglib; + F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) +INPUT PARAMETERS: + X - current point, X>=0: + * zero X is correctly handled even for B<=0 + * negative X results in exception. + A, B, C, D- parameters of 4PL model: + * A is unconstrained + * B is unconstrained; zero or negative values are handled + correctly. + * C>0, non-positive value results in exception + * D is unconstrained -int main(int argc, char **argv) -{ - // - // In this example we demonstrate linear fitting by f(x|a) = a*exp(0.5*x). - // - // We have: - // * y - vector of experimental data - // * fmatrix - matrix of basis functions calculated at sample points - // Actually, we have only one basis function F0 = exp(0.5*x). - // - real_2d_array fmatrix = "[[0.606531],[0.670320],[0.740818],[0.818731],[0.904837],[1.000000],[1.105171],[1.221403],[1.349859],[1.491825],[1.648721]]"; - real_1d_array y = "[1.133719, 1.306522, 1.504604, 1.554663, 1.884638, 2.072436, 2.257285, 2.534068, 2.622017, 2.897713, 3.219371]"; - ae_int_t info; - real_1d_array c; - lsfitreport rep; +RESULT: + model value at X - // - // Linear fitting without weights - // - lsfitlinear(y, fmatrix, info, c, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [1.98650] +NOTE: if B=0, denominator is assumed to be equal to 2.0 even for zero X + (strictly speaking, 0^0 is undefined). - // - // Linear fitting with individual weights. - // Slightly different result is returned. - // - real_1d_array w = "[1.414213, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]"; - lsfitlinearw(y, w, fmatrix, info, c, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [1.983354] - return 0; -} +NOTE: this function also throws exception if all input parameters are + correct, but overflow was detected during calculations. +NOTE: this function performs a lot of checks; if you need really high + performance, consider evaluating model yourself, without checking + for degenerate cases. -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
     
    -using namespace alglib;
    +  -- ALGLIB PROJECT --
    +     Copyright 14.05.2014 by Bochkanov Sergey
    +*************************************************************************/
    +
    double alglib::logisticcalc4( + double x, + double a, + double b, + double c, + double d, + const xparams _params = alglib::xdefault); +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function calculates value of five-parameter logistic (5PL) model at +specified point X. 5PL model has following form: -int main(int argc, char **argv) -{ - // - // In this example we demonstrate linear fitting by f(x|a,b) = a*x+b - // with simple constraint f(0)=0. - // - // We have: - // * y - vector of experimental data - // * fmatrix - matrix of basis functions sampled at [0,1] with step 0.2: - // [ 1.0 0.0 ] - // [ 1.0 0.2 ] - // [ 1.0 0.4 ] - // [ 1.0 0.6 ] - // [ 1.0 0.8 ] - // [ 1.0 1.0 ] - // first column contains value of first basis function (constant term) - // second column contains second basis function (linear term) - // * cmatrix - matrix of linear constraints: - // [ 1.0 0.0 0.0 ] - // first two columns contain coefficients before basis functions, - // last column contains desired value of their sum. - // So [1,0,0] means "1*constant_term + 0*linear_term = 0" - // - real_1d_array y = "[0.072436,0.246944,0.491263,0.522300,0.714064,0.921929]"; - real_2d_array fmatrix = "[[1,0.0],[1,0.2],[1,0.4],[1,0.6],[1,0.8],[1,1.0]]"; - real_2d_array cmatrix = "[[1,0,0]]"; - ae_int_t info; - real_1d_array c; - lsfitreport rep; - - // - // Constrained fitting without weights - // - lsfitlinearc(y, fmatrix, cmatrix, info, c, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [0,0.932933] - - // - // Constrained fitting with individual weights - // - real_1d_array w = "[1, 1.414213, 1, 1, 1, 1]"; - lsfitlinearwc(y, w, fmatrix, cmatrix, info, c, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [0,0.938322] - return 0; -} + F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) +INPUT PARAMETERS: + X - current point, X>=0: + * zero X is correctly handled even for B<=0 + * negative X results in exception. + A, B, C, D, G- parameters of 5PL model: + * A is unconstrained + * B is unconstrained; zero or negative values are handled + correctly. + * C>0, non-positive value results in exception + * D is unconstrained + * G>0, non-positive value results in exception -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +RESULT:
    +    model value at X
     
    -using namespace alglib;
    -void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0))
    -    // where x is a position on X-axis and c is adjustable parameter
    -    func = exp(-c[0]*pow(x[0],2));
    -}
    +NOTE: if B=0, denominator is assumed to be equal to Power(2.0,G) even  for
    +      zero X (strictly speaking, 0^0 is undefined).
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we demonstrate exponential fitting
    -    // by f(x) = exp(-c*x^2)
    -    // using function value only.
    -    //
    -    // Gradient is estimated using combination of numerical differences
    -    // and secant updates. diffstep variable stores differentiation step 
    -    // (we have to tell algorithm what step to use).
    -    //
    -    real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]";
    -    real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]";
    -    real_1d_array c = "[0.3]";
    -    double epsf = 0;
    -    double epsx = 0.000001;
    -    ae_int_t maxits = 0;
    -    ae_int_t info;
    -    lsfitstate state;
    -    lsfitreport rep;
    -    double diffstep = 0.0001;
    +NOTE: this function also throws exception  if  all  input  parameters  are
    +      correct, but overflow was detected during calculations.
     
    -    //
    -    // Fitting without weights
    -    //
    -    lsfitcreatef(x, y, c, diffstep, state);
    -    lsfitsetcond(state, epsf, epsx, maxits);
    -    alglib::lsfitfit(state, function_cx_1_func);
    -    lsfitresults(state, info, c, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 2
    -    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
    +NOTE: this function performs a lot of checks;  if  you  need  really  high
    +      performance, consider evaluating model  yourself,  without  checking
    +      for degenerate cases.
     
    -    //
    -    // Fitting with weights
    -    // (you can change weights and see how it changes result)
    -    //
    -    real_1d_array w = "[1,1,1,1,1,1,1,1,1,1,1]";
    -    lsfitcreatewf(x, y, w, c, diffstep, state);
    -    lsfitsetcond(state, epsf, epsx, maxits);
    -    alglib::lsfitfit(state, function_cx_1_func);
    -    lsfitresults(state, info, c, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 2
    -    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
    -    return 0;
    -}
     
    +  -- ALGLIB PROJECT --
    +     Copyright 14.05.2014 by Bochkanov Sergey
    +*************************************************************************/
    +
    double alglib::logisticcalc5( + double x, + double a, + double b, + double c, + double d, + double g, + const xparams _params = alglib::xdefault); -
    + +

    Examples:   [1]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    -void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0))
    -    // where x is a position on X-axis and c is adjustable parameter
    -    func = exp(-c[0]*pow(x[0],2));
    -}
    +
    /************************************************************************* +This function fits four-parameter logistic (4PL) model to data provided +by user. 4PL model has following form: -int main(int argc, char **argv) -{ - // - // In this example we demonstrate exponential fitting by - // f(x) = exp(-c*x^2) - // subject to bound constraints - // 0.0 <= c <= 1.0 - // using function value only. - // - // Gradient is estimated using combination of numerical differences - // and secant updates. diffstep variable stores differentiation step - // (we have to tell algorithm what step to use). - // - // Unconstrained solution is c=1.5, but because of constraints we should - // get c=1.0 (at the boundary). - // - real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]"; - real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]"; - real_1d_array c = "[0.3]"; - real_1d_array bndl = "[0.0]"; - real_1d_array bndu = "[1.0]"; - double epsf = 0; - double epsx = 0.000001; - ae_int_t maxits = 0; - ae_int_t info; - lsfitstate state; - lsfitreport rep; - double diffstep = 0.0001; + F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) - lsfitcreatef(x, y, c, diffstep, state); - lsfitsetbc(state, bndl, bndu); - lsfitsetcond(state, epsf, epsx, maxits); - alglib::lsfitfit(state, function_cx_1_func); - lsfitresults(state, info, c, rep); - printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.0] - return 0; -} +Here: + * A, D - unconstrained (see LogisticFit4EC() for constrained 4PL) + * B>=0 + * C>0 +IMPORTANT: output of this function is constrained in such way that B>0. + Because 4PL model is symmetric with respect to B, there is no + need to explore B<0. Constraining B makes algorithm easier + to stabilize and debug. + Users who for some reason prefer to work with negative B's + should transform output themselves (swap A and D, replace B by + -B). -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +4PL fitting is implemented as follows:
    +* we perform small number of restarts from random locations which helps to
    +  solve problem of bad local extrema. Locations are only partially  random
    +  - we use input data to determine good  initial  guess,  but  we  include
    +  controlled amount of randomness.
    +* we perform Levenberg-Marquardt fitting with very  tight  constraints  on
    +  parameters B and C - it allows us to find good  initial  guess  for  the
    +  second stage without risk of running into "flat spot".
    +* second  Levenberg-Marquardt  round  is   performed   without   excessive
    +  constraints. Results from the previous round are used as initial guess.
    +* after fitting is done, we compare results with best values found so far,
    +  rewrite "best solution" if needed, and move to next random location.
     
    -using namespace alglib;
    -void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0))
    -    // where x is a position on X-axis and c is adjustable parameter
    -    func = exp(-c[0]*pow(x[0],2));
    -}
    -void function_cx_1_grad(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0)) and gradient G={df/dc[i]}
    -    // where x is a position on X-axis and c is adjustable parameter.
    -    // IMPORTANT: gradient is calculated with respect to C, not to X
    -    func = exp(-c[0]*pow(x[0],2));
    -    grad[0] = -pow(x[0],2)*func;
    -}
    +Overall algorithm is very stable and is not prone to  bad  local  extrema.
    +Furthermore, it automatically scales when input data have  very  large  or
    +very small range.
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we demonstrate exponential fitting
    -    // by f(x) = exp(-c*x^2)
    -    // using function value and gradient (with respect to c).
    -    //
    -    real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]";
    -    real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]";
    -    real_1d_array c = "[0.3]";
    -    double epsf = 0;
    -    double epsx = 0.000001;
    -    ae_int_t maxits = 0;
    -    ae_int_t info;
    -    lsfitstate state;
    -    lsfitreport rep;
    +INPUT PARAMETERS:
    +    X       -   array[N], stores X-values.
    +                MUST include only non-negative numbers  (but  may  include
    +                zero values). Can be unsorted.
    +    Y       -   array[N], values to fit.
    +    N       -   number of points. If N is less than  length  of  X/Y, only
    +                leading N elements are used.
     
    -    //
    -    // Fitting without weights
    -    //
    -    lsfitcreatefg(x, y, c, true, state);
    -    lsfitsetcond(state, epsf, epsx, maxits);
    -    alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad);
    -    lsfitresults(state, info, c, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 2
    -    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
    +OUTPUT PARAMETERS:
    +    A, B, C, D- parameters of 4PL model
    +    Rep     -   fitting report. This structure has many fields,  but  ONLY
    +                ONES LISTED BELOW ARE SET:
    +                * Rep.IterationsCount - number of iterations performed
    +                * Rep.RMSError - root-mean-square error
    +                * Rep.AvgError - average absolute error
    +                * Rep.AvgRelError - average relative error (calculated for
    +                  non-zero Y-values)
    +                * Rep.MaxError - maximum absolute error
    +                * Rep.R2 - coefficient of determination,  R-squared.  This
    +                  coefficient   is  calculated  as  R2=1-RSS/TSS  (in case
    +                  of nonlinear  regression  there  are  multiple  ways  to
    +                  define R2, each of them giving different results).
     
    -    //
    -    // Fitting with weights
    -    // (you can change weights and see how it changes result)
    -    //
    -    real_1d_array w = "[1,1,1,1,1,1,1,1,1,1,1]";
    -    lsfitcreatewfg(x, y, w, c, true, state);
    -    lsfitsetcond(state, epsf, epsx, maxits);
    -    alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad);
    -    lsfitresults(state, info, c, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 2
    -    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
    -    return 0;
    -}
    +NOTE: for stability reasons the B parameter is restricted by [1/1000,1000]
    +      range. It prevents  algorithm from making trial steps  deep into the
    +      area of bad parameters.
     
    +NOTE: after  you  obtained  coefficients,  you  can  evaluate  model  with
    +      LogisticCalc4() function.
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +NOTE: if you need better control over fitting process than provided by this
    +      function, you may use LogisticFit45X().
     
    -using namespace alglib;
    -void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0))
    -    // where x is a position on X-axis and c is adjustable parameter
    -    func = exp(-c[0]*pow(x[0],2));
    -}
    -void function_cx_1_grad(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0)) and gradient G={df/dc[i]}
    -    // where x is a position on X-axis and c is adjustable parameter.
    -    // IMPORTANT: gradient is calculated with respect to C, not to X
    -    func = exp(-c[0]*pow(x[0],2));
    -    grad[0] = -pow(x[0],2)*func;
    -}
    -void function_cx_1_hess(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr) 
    -{
    -    // this callback calculates f(c,x)=exp(-c0*sqr(x0)), gradient G={df/dc[i]} and Hessian H={d2f/(dc[i]*dc[j])}
    -    // where x is a position on X-axis and c is adjustable parameter.
    -    // IMPORTANT: gradient/Hessian are calculated with respect to C, not to X
    -    func = exp(-c[0]*pow(x[0],2));
    -    grad[0] = -pow(x[0],2)*func;
    -    hess[0][0] = pow(x[0],4)*func;
    -}
    +NOTE: step is automatically scaled according to scale of parameters  being
    +      fitted before we compare its length with EpsX. Thus,  this  function
    +      can be used to fit data with very small or very large values without
    +      changing EpsX.
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we demonstrate exponential fitting
    -    // by f(x) = exp(-c*x^2)
    -    // using function value, gradient and Hessian (with respect to c)
    -    //
    -    real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]";
    -    real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]";
    -    real_1d_array c = "[0.3]";
    -    double epsf = 0;
    -    double epsx = 0.000001;
    -    ae_int_t maxits = 0;
    -    ae_int_t info;
    -    lsfitstate state;
    -    lsfitreport rep;
     
    -    //
    -    // Fitting without weights
    -    //
    -    lsfitcreatefgh(x, y, c, state);
    -    lsfitsetcond(state, epsf, epsx, maxits);
    -    alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad, function_cx_1_hess);
    -    lsfitresults(state, info, c, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 2
    -    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
    +  -- ALGLIB PROJECT --
    +     Copyright 14.02.2014 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::logisticfit4( + real_1d_array x, + real_1d_array y, + ae_int_t n, + double& a, + double& b, + double& c, + double& d, + lsfitreport& rep, + const xparams _params = alglib::xdefault); - // - // Fitting with weights - // (you can change weights and see how it changes result) - // - real_1d_array w = "[1,1,1,1,1,1,1,1,1,1,1]"; - lsfitcreatewfgh(x, y, w, c, state); - lsfitsetcond(state, epsf, epsx, maxits); - alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad, function_cx_1_hess); - lsfitresults(state, info, c, rep); - printf("%d\n", int(info)); // EXPECTED: 2 - printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5] - return 0; -} +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This is "expert" 4PL/5PL fitting function, which can be used if you need +better control over fitting process than provided by LogisticFit4() or +LogisticFit5(). +This function fits model of the form -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +    F(x|A,B,C,D)   = D+(A-D)/(1+Power(x/C,B))           (4PL model)
     
    -using namespace alglib;
    -void function_debt_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    -{
    -    //
    -    // this callback calculates f(c,x)=c[0]*(1+c[1]*(pow(x[0]-1999,c[2])-1))
    -    //
    -    func = c[0]*(1+c[1]*(pow(x[0]-1999,c[2])-1));
    -}
    +or
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we demonstrate fitting by
    -    //     f(x) = c[0]*(1+c[1]*((x-1999)^c[2]-1))
    -    // subject to bound constraints
    -    //     -INF  < c[0] < +INF
    -    //      -10 <= c[1] <= +10
    -    //      0.1 <= c[2] <= 2.0
    -    // Data we want to fit are time series of Japan national debt
    -    // collected from 2000 to 2008 measured in USD (dollars, not
    -    // millions of dollars).
    -    //
    -    // Our variables are:
    -    //     c[0] - debt value at initial moment (2000),
    -    //     c[1] - direction coefficient (growth or decrease),
    -    //     c[2] - curvature coefficient.
    -    // You may see that our variables are badly scaled - first one 
    -    // is order of 10^12, and next two are somewhere about 1 in 
    -    // magnitude. Such problem is difficult to solve without some
    -    // kind of scaling.
    -    // That is exactly where lsfitsetscale() function can be used.
    -    // We set scale of our variables to [1.0E12, 1, 1], which allows
    -    // us to easily solve this problem.
    -    //
    -    // You can try commenting out lsfitsetscale() call - and you will 
    -    // see that algorithm will fail to converge.
    -    //
    -    real_2d_array x = "[[2000],[2001],[2002],[2003],[2004],[2005],[2006],[2007],[2008]]";
    -    real_1d_array y = "[4323239600000.0, 4560913100000.0, 5564091500000.0, 6743189300000.0, 7284064600000.0, 7050129600000.0, 7092221500000.0, 8483907600000.0, 8625804400000.0]";
    -    real_1d_array c = "[1.0e+13, 1, 1]";
    -    double epsf = 0;
    -    double epsx = 1.0e-5;
    -    real_1d_array bndl = "[-inf, -10, 0.1]";
    -    real_1d_array bndu = "[+inf, +10, 2.0]";
    -    real_1d_array s = "[1.0e+12, 1, 1]";
    -    ae_int_t maxits = 0;
    -    ae_int_t info;
    -    lsfitstate state;
    -    lsfitreport rep;
    -    double diffstep = 1.0e-5;
    +    F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G)    (5PL model)
     
    -    lsfitcreatef(x, y, c, diffstep, state);
    -    lsfitsetcond(state, epsf, epsx, maxits);
    -    lsfitsetbc(state, bndl, bndu);
    -    lsfitsetscale(state, s);
    -    alglib::lsfitfit(state, function_debt_func);
    -    lsfitresults(state, info, c, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 2
    -    printf("%s\n", c.tostring(-2).c_str()); // EXPECTED: [4.142560E+12, 0.434240, 0.565376]
    -    return 0;
    -}
    +Here:
    +    * A, D - unconstrained
    +    * B>=0 for 4PL, unconstrained for 5PL
    +    * C>0
    +    * G>0 (if present)
     
    +INPUT PARAMETERS:
    +    X       -   array[N], stores X-values.
    +                MUST include only non-negative numbers  (but  may  include
    +                zero values). Can be unsorted.
    +    Y       -   array[N], values to fit.
    +    N       -   number of points. If N is less than  length  of  X/Y, only
    +                leading N elements are used.
    +    CnstrLeft-  optional equality constraint for model value at the   left
    +                boundary (at X=0). Specify NAN (Not-a-Number)  if  you  do
    +                not need constraint on the model value at X=0 (in C++  you
    +                can pass alglib::fp_nan as parameter, in  C#  it  will  be
    +                Double.NaN).
    +                See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
    +                information about constraints.
    +    CnstrRight- optional equality constraint for model value at X=infinity.
    +                Specify NAN (Not-a-Number) if you do not  need  constraint
    +                on the model value (in C++  you can pass alglib::fp_nan as
    +                parameter, in  C# it will  be Double.NaN).
    +                See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
    +                information about constraints.
    +    Is4PL   -   whether 4PL or 5PL models are fitted
    +    LambdaV -   regularization coefficient, LambdaV>=0.
    +                Set it to zero unless you know what you are doing.
    +    EpsX    -   stopping condition (step size), EpsX>=0.
    +                Zero value means that small step is automatically chosen.
    +                See notes below for more information.
    +    RsCnt   -   number of repeated restarts from  random  points.  4PL/5PL
    +                models are prone to problem of bad local extrema. Utilizing
    +                multiple random restarts allows  us  to  improve algorithm
    +                convergence.
    +                RsCnt>=0.
    +                Zero value means that function automatically choose  small
    +                amount of restarts (recommended).
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +OUTPUT PARAMETERS:
    +    A, B, C, D- parameters of 4PL model
    +    G       -   parameter of 5PL model; for Is4PL=True, G=1 is returned.
    +    Rep     -   fitting report. This structure has many fields,  but  ONLY
    +                ONES LISTED BELOW ARE SET:
    +                * Rep.IterationsCount - number of iterations performed
    +                * Rep.RMSError - root-mean-square error
    +                * Rep.AvgError - average absolute error
    +                * Rep.AvgRelError - average relative error (calculated for
    +                  non-zero Y-values)
    +                * Rep.MaxError - maximum absolute error
    +                * Rep.R2 - coefficient of determination,  R-squared.  This
    +                  coefficient   is  calculated  as  R2=1-RSS/TSS  (in case
    +                  of nonlinear  regression  there  are  multiple  ways  to
    +                  define R2, each of them giving different results).
     
    -using namespace alglib;
    +NOTE: for better stability B  parameter is restricted by [+-1/1000,+-1000]
    +      range, and G is restricted by [1/10,10] range. It prevents algorithm
    +      from making trial steps deep into the area of bad parameters.
     
    +NOTE: after  you  obtained  coefficients,  you  can  evaluate  model  with
    +      LogisticCalc5() function.
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates polynomial fitting.
    -    //
    -    // Fitting is done by two (M=2) functions from polynomial basis:
    -    //     f0 = 1
    -    //     f1 = x
    -    // Basically, it just a linear fit; more complex polynomials may be used
    -    // (e.g. parabolas with M=3, cubic with M=4), but even such simple fit allows
    -    // us to demonstrate polynomialfit() function in action.
    -    //
    -    // We have:
    -    // * x      set of abscissas
    -    // * y      experimental data
    -    //
    -    // Additionally we demonstrate weighted fitting, where second point has
    -    // more weight than other ones.
    -    //
    -    real_1d_array x = "[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]";
    -    real_1d_array y = "[0.00,0.05,0.26,0.32,0.33,0.43,0.60,0.60,0.77,0.98,1.02]";
    -    ae_int_t m = 2;
    -    double t = 2;
    -    ae_int_t info;
    -    barycentricinterpolant p;
    -    polynomialfitreport rep;
    -    double v;
    -
    -    //
    -    // Fitting without individual weights
    -    //
    -    // NOTE: result is returned as barycentricinterpolant structure.
    -    //       if you want to get representation in the power basis,
    -    //       you can use barycentricbar2pow() function to convert
    -    //       from barycentric to power representation (see docs for 
    -    //       POLINT subpackage for more info).
    -    //
    -    polynomialfit(x, y, m, info, p, rep);
    -    v = barycentriccalc(p, t);
    -    printf("%.2f\n", double(v)); // EXPECTED: 2.011
    -
    -    //
    -    // Fitting with individual weights
    -    //
    -    // NOTE: slightly different result is returned
    -    //
    -    real_1d_array w = "[1,1.414213562,1,1,1,1,1,1,1,1,1]";
    -    real_1d_array xc = "[]";
    -    real_1d_array yc = "[]";
    -    integer_1d_array dc = "[]";
    -    polynomialfitwc(x, y, w, xc, yc, dc, m, info, p, rep);
    -    v = barycentriccalc(p, t);
    -    printf("%.2f\n", double(v)); // EXPECTED: 2.023
    -    return 0;
    -}
    +NOTE: step is automatically scaled according to scale of parameters  being
    +      fitted before we compare its length with EpsX. Thus,  this  function
    +      can be used to fit data with very small or very large values without
    +      changing EpsX.
     
    +EQUALITY CONSTRAINTS ON PARAMETERS
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +4PL/5PL solver supports equality constraints on model values at  the  left
    +boundary (X=0) and right  boundary  (X=infinity).  These  constraints  are
    +completely optional and you can specify both of them, only  one  -  or  no
    +constraints at all.
     
    -using namespace alglib;
    +Parameter  CnstrLeft  contains  left  constraint (or NAN for unconstrained
    +fitting), and CnstrRight contains right  one.  For  4PL,  left  constraint
    +ALWAYS corresponds to parameter A, and right one is ALWAYS  constraint  on
    +D. That's because 4PL model is normalized in such way that B>=0.
     
    +For 5PL model things are different. Unlike  4PL  one,  5PL  model  is  NOT
    +symmetric with respect to  change  in  sign  of  B. Thus, negative B's are
    +possible, and left constraint may constrain parameter A (for positive B's)
    +- or parameter D (for negative B's). Similarly changes  meaning  of  right
    +constraint.
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates polynomial fitting.
    -    //
    -    // Fitting is done by two (M=2) functions from polynomial basis:
    -    //     f0 = 1
    -    //     f1 = x
    -    // with simple constraint on function value
    -    //     f(0) = 0
    -    // Basically, it just a linear fit; more complex polynomials may be used
    -    // (e.g. parabolas with M=3, cubic with M=4), but even such simple fit allows
    -    // us to demonstrate polynomialfit() function in action.
    -    //
    -    // We have:
    -    // * x      set of abscissas
    -    // * y      experimental data
    -    // * xc     points where constraints are placed
    -    // * yc     constraints on derivatives
    -    // * dc     derivative indices
    -    //          (0 means function itself, 1 means first derivative)
    -    //
    -    real_1d_array x = "[1.0,1.0]";
    -    real_1d_array y = "[0.9,1.1]";
    -    real_1d_array w = "[1,1]";
    -    real_1d_array xc = "[0]";
    -    real_1d_array yc = "[0]";
    -    integer_1d_array dc = "[0]";
    -    double t = 2;
    -    ae_int_t m = 2;
    -    ae_int_t info;
    -    barycentricinterpolant p;
    -    polynomialfitreport rep;
    -    double v;
    +You do not have to decide what parameter to  constrain  -  algorithm  will
    +automatically determine correct parameters as fitting progresses. However,
    +question highlighted above is important when you interpret fitting results.
     
    -    polynomialfitwc(x, y, w, xc, yc, dc, m, info, p, rep);
    -    v = barycentriccalc(p, t);
    -    printf("%.2f\n", double(v)); // EXPECTED: 2.000
    -    return 0;
    -}
     
    +  -- ALGLIB PROJECT --
    +     Copyright 14.02.2014 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::logisticfit45x( + real_1d_array x, + real_1d_array y, + ae_int_t n, + double cnstrleft, + double cnstrright, + bool is4pl, + double lambdav, + double epsx, + ae_int_t rscnt, + double& a, + double& b, + double& c, + double& d, + double& g, + lsfitreport& rep, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // In this example we demonstrate penalized spline fitting of noisy data
    -    //
    -    // We have:
    -    // * x - abscissas
    -    // * y - vector of experimental data, straight line with small noise
    -    //
    -    real_1d_array x = "[0.00,0.10,0.20,0.30,0.40,0.50,0.60,0.70,0.80,0.90]";
    -    real_1d_array y = "[0.10,0.00,0.30,0.40,0.30,0.40,0.62,0.68,0.75,0.95]";
    -    ae_int_t info;
    -    double v;
    -    spline1dinterpolant s;
    -    spline1dfitreport rep;
    -    double rho;
    -
    -    //
    -    // Fit with VERY small amount of smoothing (rho = -5.0)
    -    // and large number of basis functions (M=50).
    -    //
    -    // With such small regularization penalized spline almost fully reproduces function values
    -    //
    -    rho = -5.0;
    -    spline1dfitpenalized(x, y, 50, rho, info, s, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 1
    -    v = spline1dcalc(s, 0.0);
    -    printf("%.1f\n", double(v)); // EXPECTED: 0.10
    -
    -    //
    -    // Fit with VERY large amount of smoothing (rho = 10.0)
    -    // and large number of basis functions (M=50).
    -    //
    -    // With such regularization our spline should become close to the straight line fit.
    -    // We will compare its value in x=1.0 with results obtained from such fit.
    -    //
    -    rho = +10.0;
    -    spline1dfitpenalized(x, y, 50, rho, info, s, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 1
    -    v = spline1dcalc(s, 1.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 0.969
    +
    /************************************************************************* +This function fits four-parameter logistic (4PL) model to data provided +by user, with optional constraints on parameters A and D. 4PL model has +following form: - // - // In real life applications you may need some moderate degree of fitting, - // so we try to fit once more with rho=3.0. - // - rho = +3.0; - spline1dfitpenalized(x, y, 50, rho, info, s, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - return 0; -} + F(x|A,B,C,D) = D+(A-D)/(1+Power(x/C,B)) +Here: + * A, D - with optional equality constraints + * B>=0 + * C>0 -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +IMPORTANT: output of this function is constrained in  such  way that  B>0.
    +           Because 4PL model is symmetric with respect to B, there  is  no
    +           need to explore  B<0.  Constraining  B  makes  algorithm easier
    +           to stabilize and debug.
    +           Users  who  for  some  reason  prefer to work with negative B's
    +           should transform output themselves (swap A and D, replace B  by
    +           -B).
     
    -using namespace alglib;
    +4PL fitting is implemented as follows:
    +* we perform small number of restarts from random locations which helps to
    +  solve problem of bad local extrema. Locations are only partially  random
    +  - we use input data to determine good  initial  guess,  but  we  include
    +  controlled amount of randomness.
    +* we perform Levenberg-Marquardt fitting with very  tight  constraints  on
    +  parameters B and C - it allows us to find good  initial  guess  for  the
    +  second stage without risk of running into "flat spot".
    +* second  Levenberg-Marquardt  round  is   performed   without   excessive
    +  constraints. Results from the previous round are used as initial guess.
    +* after fitting is done, we compare results with best values found so far,
    +  rewrite "best solution" if needed, and move to next random location.
     
    +Overall algorithm is very stable and is not prone to  bad  local  extrema.
    +Furthermore, it automatically scales when input data have  very  large  or
    +very small range.
     
    -int main(int argc, char **argv)
    -{
    -    real_1d_array x = "[1,2,3,4,5,6,7,8]";
    -    real_1d_array y = "[0.06313223,0.44552624,0.61838364,0.71385108,0.77345838,0.81383140,0.84280033,0.86449822]";
    -    ae_int_t n = 8;
    -    double a;
    -    double b;
    -    double c;
    -    double d;
    -    lsfitreport rep;
    +INPUT PARAMETERS:
    +    X       -   array[N], stores X-values.
    +                MUST include only non-negative numbers  (but  may  include
    +                zero values). Can be unsorted.
    +    Y       -   array[N], values to fit.
    +    N       -   number of points. If N is less than  length  of  X/Y, only
    +                leading N elements are used.
    +    CnstrLeft-  optional equality constraint for model value at the   left
    +                boundary (at X=0). Specify NAN (Not-a-Number)  if  you  do
    +                not need constraint on the model value at X=0 (in C++  you
    +                can pass alglib::fp_nan as parameter, in  C#  it  will  be
    +                Double.NaN).
    +                See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
    +                information about constraints.
    +    CnstrRight- optional equality constraint for model value at X=infinity.
    +                Specify NAN (Not-a-Number) if you do not  need  constraint
    +                on the model value (in C++  you can pass alglib::fp_nan as
    +                parameter, in  C# it will  be Double.NaN).
    +                See  below,  section  "EQUALITY  CONSTRAINTS"   for   more
    +                information about constraints.
     
    -    //
    -    // Test logisticfit4() on carefully designed data with a priori known answer.
    -    //
    -    logisticfit4(x, y, n, a, b, c, d, rep);
    -    printf("%.1f\n", double(a)); // EXPECTED: -1.000
    -    printf("%.1f\n", double(b)); // EXPECTED: 1.200
    -    printf("%.1f\n", double(c)); // EXPECTED: 0.900
    -    printf("%.1f\n", double(d)); // EXPECTED: 1.000
    +OUTPUT PARAMETERS:
    +    A, B, C, D- parameters of 4PL model
    +    Rep     -   fitting report. This structure has many fields,  but  ONLY
    +                ONES LISTED BELOW ARE SET:
    +                * Rep.IterationsCount - number of iterations performed
    +                * Rep.RMSError - root-mean-square error
    +                * Rep.AvgError - average absolute error
    +                * Rep.AvgRelError - average relative error (calculated for
    +                  non-zero Y-values)
    +                * Rep.MaxError - maximum absolute error
    +                * Rep.R2 - coefficient of determination,  R-squared.  This
    +                  coefficient   is  calculated  as  R2=1-RSS/TSS  (in case
    +                  of nonlinear  regression  there  are  multiple  ways  to
    +                  define R2, each of them giving different results).
     
    -    //
    -    // Evaluate model at point x=0.5
    -    //
    -    double v;
    -    v = logisticcalc4(0.5, a, b, c, d);
    -    printf("%.2f\n", double(v)); // EXPECTED: -0.33874308
    -    return 0;
    -}
    +NOTE: for stability reasons the B parameter is restricted by [1/1000,1000]
    +      range. It prevents  algorithm from making trial steps  deep into the
    +      area of bad parameters.
     
    +NOTE: after  you  obtained  coefficients,  you  can  evaluate  model  with
    +      LogisticCalc4() function.
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +NOTE: if you need better control over fitting process than provided by this
    +      function, you may use LogisticFit45X().
     
    -using namespace alglib;
    +NOTE: step is automatically scaled according to scale of parameters  being
    +      fitted before we compare its length with EpsX. Thus,  this  function
    +      can be used to fit data with very small or very large values without
    +      changing EpsX.
     
    +EQUALITY CONSTRAINTS ON PARAMETERS
     
    -int main(int argc, char **argv)
    -{
    -    real_1d_array x = "[1,2,3,4,5,6,7,8]";
    -    real_1d_array y = "[0.1949776139,0.5710060208,0.726002637,0.8060434158,0.8534547965,0.8842071579,0.9054773317,0.9209088299]";
    -    ae_int_t n = 8;
    -    double a;
    -    double b;
    -    double c;
    -    double d;
    -    double g;
    -    lsfitreport rep;
    +4PL/5PL solver supports equality constraints on model values at  the  left
    +boundary (X=0) and right  boundary  (X=infinity).  These  constraints  are
    +completely optional and you can specify both of them, only  one  -  or  no
    +constraints at all.
     
    -    //
    -    // Test logisticfit5() on carefully designed data with a priori known answer.
    -    //
    -    logisticfit5(x, y, n, a, b, c, d, g, rep);
    -    printf("%.1f\n", double(a)); // EXPECTED: -1.000
    -    printf("%.1f\n", double(b)); // EXPECTED: 1.200
    -    printf("%.1f\n", double(c)); // EXPECTED: 0.900
    -    printf("%.1f\n", double(d)); // EXPECTED: 1.000
    -    printf("%.1f\n", double(g)); // EXPECTED: 1.200
    +Parameter  CnstrLeft  contains  left  constraint (or NAN for unconstrained
    +fitting), and CnstrRight contains right  one.  For  4PL,  left  constraint
    +ALWAYS corresponds to parameter A, and right one is ALWAYS  constraint  on
    +D. That's because 4PL model is normalized in such way that B>=0.
     
    -    //
    -    // Evaluate model at point x=0.5
    -    //
    -    double v;
    -    v = logisticcalc5(0.5, a, b, c, d, g);
    -    printf("%.2f\n", double(v)); // EXPECTED: -0.2354656824
    -    return 0;
    -}
     
    +  -- ALGLIB PROJECT --
    +     Copyright 14.02.2014 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::logisticfit4ec( + real_1d_array x, + real_1d_array y, + ae_int_t n, + double cnstrleft, + double cnstrright, + double& a, + double& b, + double& c, + double& d, + lsfitreport& rep, + const xparams _params = alglib::xdefault); -
    -
    - -mannwhitneyutest
    - - -
    - + +
     
    /************************************************************************* -Mann-Whitney U-test +This function fits five-parameter logistic (5PL) model to data provided +by user. 5PL model has following form: -This test checks hypotheses about whether X and Y are samples of two -continuous distributions of the same shape and same median or whether -their medians are different. + F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) -The following tests are performed: - * two-tailed test (null hypothesis - the medians are equal) - * left-tailed test (null hypothesis - the median of the first sample - is greater than or equal to the median of the second sample) - * right-tailed test (null hypothesis - the median of the first sample - is less than or equal to the median of the second sample). +Here: + * A, D - unconstrained + * B - unconstrained + * C>0 + * G>0 -Requirements: - * the samples are independent - * X and Y are continuous distributions (or discrete distributions well- - approximating continuous distributions) - * distributions of X and Y have the same shape. The only possible - difference is their position (i.e. the value of the median) - * the number of elements in each sample is not less than 5 - * the scale of measurement should be ordinal, interval or ratio (i.e. - the test could not be applied to nominal variables). +IMPORTANT: unlike in 4PL fitting, output of this function is NOT + constrained in such way that B is guaranteed to be positive. + Furthermore, unlike 4PL, 5PL model is NOT symmetric with + respect to B, so you can NOT transform model to equivalent one, + with B having desired sign (>0 or <0). -The test is non-parametric and doesn't require distributions to be normal. +5PL fitting is implemented as follows: +* we perform small number of restarts from random locations which helps to + solve problem of bad local extrema. Locations are only partially random + - we use input data to determine good initial guess, but we include + controlled amount of randomness. +* we perform Levenberg-Marquardt fitting with very tight constraints on + parameters B and C - it allows us to find good initial guess for the + second stage without risk of running into "flat spot". Parameter G is + fixed at G=1. +* second Levenberg-Marquardt round is performed without excessive + constraints on B and C, but with G still equal to 1. Results from the + previous round are used as initial guess. +* third Levenberg-Marquardt round relaxes constraints on G and tries two + different models - one with B>0 and one with B<0. +* after fitting is done, we compare results with best values found so far, + rewrite "best solution" if needed, and move to next random location. -Input parameters: - X - sample 1. Array whose index goes from 0 to N-1. - N - size of the sample. N>=5 - Y - sample 2. Array whose index goes from 0 to M-1. - M - size of the sample. M>=5 +Overall algorithm is very stable and is not prone to bad local extrema. +Furthermore, it automatically scales when input data have very large or +very small range. -Output parameters: - BothTails - p-value for two-tailed test. - If BothTails is less than the given significance level - the null hypothesis is rejected. - LeftTail - p-value for left-tailed test. - If LeftTail is less than the given significance level, - the null hypothesis is rejected. - RightTail - p-value for right-tailed test. - If RightTail is less than the given significance level - the null hypothesis is rejected. +INPUT PARAMETERS: + X - array[N], stores X-values. + MUST include only non-negative numbers (but may include + zero values). Can be unsorted. + Y - array[N], values to fit. + N - number of points. If N is less than length of X/Y, only + leading N elements are used. -To calculate p-values, special approximation is used. This method lets us -calculate p-values with satisfactory accuracy in interval [0.0001, 1]. -There is no approximation outside the [0.0001, 1] interval. Therefore, if -the significance level outlies this interval, the test returns 0.0001. +OUTPUT PARAMETERS: + A,B,C,D,G- parameters of 5PL model + Rep - fitting report. This structure has many fields, but ONLY + ONES LISTED BELOW ARE SET: + * Rep.IterationsCount - number of iterations performed + * Rep.RMSError - root-mean-square error + * Rep.AvgError - average absolute error + * Rep.AvgRelError - average relative error (calculated for + non-zero Y-values) + * Rep.MaxError - maximum absolute error + * Rep.R2 - coefficient of determination, R-squared. This + coefficient is calculated as R2=1-RSS/TSS (in case + of nonlinear regression there are multiple ways to + define R2, each of them giving different results). -Relative precision of approximation of p-value: +NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] + range, and G is restricted by [1/10,10] range. It prevents algorithm + from making trial steps deep into the area of bad parameters. -N M Max.err. Rms.err. -5..10 N..10 1.4e-02 6.0e-04 -5..10 N..100 2.2e-02 5.3e-06 -10..15 N..15 1.0e-02 3.2e-04 -10..15 N..100 1.0e-02 2.2e-05 -15..100 N..100 6.1e-03 2.7e-06 +NOTE: after you obtained coefficients, you can evaluate model with + LogisticCalc5() function. -For N,M>100 accuracy checks weren't put into practice, but taking into -account characteristics of asymptotic approximation used, precision should -not be sharply different from the values for interval [5, 100]. +NOTE: if you need better control over fitting process than provided by this + function, you may use LogisticFit45X(). -NOTE: P-value approximation was optimized for 0.0001<=p<=0.2500. Thus, - P's outside of this interval are enforced to these bounds. Say, you - may quite often get P equal to exactly 0.25 or 0.0001. +NOTE: step is automatically scaled according to scale of parameters being + fitted before we compare its length with EpsX. Thus, this function + can be used to fit data with very small or very large values without + changing EpsX. - -- ALGLIB -- - Copyright 09.04.2007 by Bochkanov Sergey + + -- ALGLIB PROJECT -- + Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mannwhitneyutest( +
    void alglib::logisticfit5( real_1d_array x, - ae_int_t n, real_1d_array y, - ae_int_t m, - double& bothtails, - double& lefttail, - double& righttail); + ae_int_t n, + double& a, + double& b, + double& c, + double& d, + double& g, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    - -
    - -cmatrixdet
    -cmatrixludet
    -rmatrixdet
    -rmatrixludet
    -spdmatrixcholeskydet
    -spdmatrixdet
    - - - - - - - -
    matdet_d_1 Determinant calculation, real matrix, short form
    matdet_d_2 Determinant calculation, real matrix, full form
    matdet_d_3 Determinant calculation, complex matrix, short form
    matdet_d_4 Determinant calculation, complex matrix, full form
    matdet_d_5 Determinant calculation, complex matrix with zero imaginary part, short form
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Calculation of the determinant of a general matrix +This function fits five-parameter logistic (5PL) model to data provided +by user, subject to optional equality constraints on parameters A and D. +5PL model has following form: -Input parameters: - A - matrix, array[0..N-1, 0..N-1] - N - (optional) size of matrix A: - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, automatically determined from matrix size - (A must be square matrix) + F(x|A,B,C,D,G) = D+(A-D)/Power(1+Power(x/C,B),G) -Result: determinant of matrix A. +Here: + * A, D - with optional equality constraints + * B - unconstrained + * C>0 + * G>0 - -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey -*************************************************************************/ -
    alglib::complex alglib::cmatrixdet(complex_2d_array a); -alglib::complex alglib::cmatrixdet(complex_2d_array a, ae_int_t n); +IMPORTANT: unlike in 4PL fitting, output of this function is NOT + constrained in such way that B is guaranteed to be positive. + Furthermore, unlike 4PL, 5PL model is NOT symmetric with + respect to B, so you can NOT transform model to equivalent one, + with B having desired sign (>0 or <0). -
    -

    Examples:   [1]  [2]  [3]  

    - -
    -
    /************************************************************************* -Determinant calculation of the matrix given by its LU decomposition. +5PL fitting is implemented as follows: +* we perform small number of restarts from random locations which helps to + solve problem of bad local extrema. Locations are only partially random + - we use input data to determine good initial guess, but we include + controlled amount of randomness. +* we perform Levenberg-Marquardt fitting with very tight constraints on + parameters B and C - it allows us to find good initial guess for the + second stage without risk of running into "flat spot". Parameter G is + fixed at G=1. +* second Levenberg-Marquardt round is performed without excessive + constraints on B and C, but with G still equal to 1. Results from the + previous round are used as initial guess. +* third Levenberg-Marquardt round relaxes constraints on G and tries two + different models - one with B>0 and one with B<0. +* after fitting is done, we compare results with best values found so far, + rewrite "best solution" if needed, and move to next random location. -Input parameters: - A - LU decomposition of the matrix (output of - RMatrixLU subroutine). - Pivots - table of permutations which were made during - the LU decomposition. - Output of RMatrixLU subroutine. - N - (optional) size of matrix A: - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, automatically determined from matrix size - (A must be square matrix) +Overall algorithm is very stable and is not prone to bad local extrema. +Furthermore, it automatically scales when input data have very large or +very small range. -Result: matrix determinant. +INPUT PARAMETERS: + X - array[N], stores X-values. + MUST include only non-negative numbers (but may include + zero values). Can be unsorted. + Y - array[N], values to fit. + N - number of points. If N is less than length of X/Y, only + leading N elements are used. + CnstrLeft- optional equality constraint for model value at the left + boundary (at X=0). Specify NAN (Not-a-Number) if you do + not need constraint on the model value at X=0 (in C++ you + can pass alglib::fp_nan as parameter, in C# it will be + Double.NaN). + See below, section "EQUALITY CONSTRAINTS" for more + information about constraints. + CnstrRight- optional equality constraint for model value at X=infinity. + Specify NAN (Not-a-Number) if you do not need constraint + on the model value (in C++ you can pass alglib::fp_nan as + parameter, in C# it will be Double.NaN). + See below, section "EQUALITY CONSTRAINTS" for more + information about constraints. - -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey -*************************************************************************/ -
    alglib::complex alglib::cmatrixludet( - complex_2d_array a, - integer_1d_array pivots); -alglib::complex alglib::cmatrixludet( - complex_2d_array a, - integer_1d_array pivots, - ae_int_t n); +OUTPUT PARAMETERS: + A,B,C,D,G- parameters of 5PL model + Rep - fitting report. This structure has many fields, but ONLY + ONES LISTED BELOW ARE SET: + * Rep.IterationsCount - number of iterations performed + * Rep.RMSError - root-mean-square error + * Rep.AvgError - average absolute error + * Rep.AvgRelError - average relative error (calculated for + non-zero Y-values) + * Rep.MaxError - maximum absolute error + * Rep.R2 - coefficient of determination, R-squared. This + coefficient is calculated as R2=1-RSS/TSS (in case + of nonlinear regression there are multiple ways to + define R2, each of them giving different results). -
    - -
    -
    /************************************************************************* -Calculation of the determinant of a general matrix +NOTE: for better stability B parameter is restricted by [+-1/1000,+-1000] + range, and G is restricted by [1/10,10] range. It prevents algorithm + from making trial steps deep into the area of bad parameters. -Input parameters: - A - matrix, array[0..N-1, 0..N-1] - N - (optional) size of matrix A: - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, automatically determined from matrix size - (A must be square matrix) +NOTE: after you obtained coefficients, you can evaluate model with + LogisticCalc5() function. -Result: determinant of matrix A. +NOTE: if you need better control over fitting process than provided by this + function, you may use LogisticFit45X(). - -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::rmatrixdet(real_2d_array a); -double alglib::rmatrixdet(real_2d_array a, ae_int_t n); +NOTE: step is automatically scaled according to scale of parameters being + fitted before we compare its length with EpsX. Thus, this function + can be used to fit data with very small or very large values without + changing EpsX. -
    -

    Examples:   [1]  [2]  

    - -
    -
    /************************************************************************* -Determinant calculation of the matrix given by its LU decomposition. +EQUALITY CONSTRAINTS ON PARAMETERS -Input parameters: - A - LU decomposition of the matrix (output of - RMatrixLU subroutine). - Pivots - table of permutations which were made during - the LU decomposition. - Output of RMatrixLU subroutine. - N - (optional) size of matrix A: - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, automatically determined from matrix size - (A must be square matrix) +5PL solver supports equality constraints on model values at the left +boundary (X=0) and right boundary (X=infinity). These constraints are +completely optional and you can specify both of them, only one - or no +constraints at all. -Result: matrix determinant. +Parameter CnstrLeft contains left constraint (or NAN for unconstrained +fitting), and CnstrRight contains right one. - -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey +Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign +of B. Thus, negative B's are possible, and left constraint may constrain +parameter A (for positive B's) - or parameter D (for negative B's). +Similarly changes meaning of right constraint. + +You do not have to decide what parameter to constrain - algorithm will +automatically determine correct parameters as fitting progresses. However, +question highlighted above is important when you interpret fitting results. + + + -- ALGLIB PROJECT -- + Copyright 14.02.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixludet(real_2d_array a, integer_1d_array pivots); -double alglib::rmatrixludet( - real_2d_array a, - integer_1d_array pivots, - ae_int_t n); +
    void alglib::logisticfit5ec( + real_1d_array x, + real_1d_array y, + ae_int_t n, + double cnstrleft, + double cnstrright, + double& a, + double& b, + double& c, + double& d, + double& g, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Determinant calculation of the matrix given by the Cholesky decomposition. +Nonlinear least squares fitting using function values only. -Input parameters: - A - Cholesky decomposition, - output of SMatrixCholesky subroutine. - N - (optional) size of matrix A: - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, automatically determined from matrix size - (A must be square matrix) +Combination of numerical differentiation and secant updates is used to +obtain function Jacobian. -As the determinant is equal to the product of squares of diagonal elements, -it’s not necessary to specify which triangle - lower or upper - the matrix -is stored in. +Nonlinear task min(F(c)) is solved, where -Result: - matrix determinant. + F(c) = (f(c,x[0])-y[0])^2 + ... + (f(c,x[n-1])-y[n-1])^2, - -- ALGLIB -- - Copyright 2005-2008 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::spdmatrixcholeskydet(real_2d_array a); -double alglib::spdmatrixcholeskydet(real_2d_array a, ae_int_t n); + * N is a number of points, + * M is a dimension of a space points belong to, + * K is a dimension of a space of parameters being fitted, + * w is an N-dimensional vector of weight coefficients, + * x is a set of N points, each of them is an M-dimensional vector, + * c is a K-dimensional vector of parameters being fitted -
    - -
    -
    /************************************************************************* -Determinant calculation of the symmetric positive definite matrix. +This subroutine uses only f(c,x[i]). -Input parameters: - A - matrix. Array with elements [0..N-1, 0..N-1]. - N - (optional) size of matrix A: - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, automatically determined from matrix size - (A must be square matrix) - IsUpper - (optional) storage type: - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used/changed by - function - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used/changed by - function - * if not given, both lower and upper triangles must be - filled. +INPUT PARAMETERS: + X - array[0..N-1,0..M-1], points (one row = one point) + Y - array[0..N-1], function values. + C - array[0..K-1], initial approximation to the solution, + N - number of points, N>1 + M - dimension of space + K - number of parameters being fitted + DiffStep- numerical differentiation step; + should not be very small or large; + large = loss of accuracy + small = growth of round-off errors -Result: - determinant of matrix A. - If matrix A is not positive definite, exception is thrown. +OUTPUT PARAMETERS: + State - structure which stores algorithm state -- ALGLIB -- - Copyright 2005-2008 by Bochkanov Sergey + Copyright 18.10.2008 by Bochkanov Sergey *************************************************************************/ -
    double alglib::spdmatrixdet(real_2d_array a); -double alglib::spdmatrixdet(real_2d_array a, ae_int_t n, bool isupper); +
    void alglib::lsfitcreatef( + real_2d_array x, + real_1d_array y, + real_1d_array c, + double diffstep, + lsfitstate& state, + const xparams _params = alglib::xdefault); +void alglib::lsfitcreatef( + real_2d_array x, + real_1d_array y, + real_1d_array c, + ae_int_t n, + ae_int_t m, + ae_int_t k, + double diffstep, + lsfitstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    -
    +
    /************************************************************************* +Nonlinear least squares fitting using gradient only, without individual +weights. -int main(int argc, char **argv) -{ - real_2d_array b = "[[1,2],[2,1]]"; - double a; - a = rmatrixdet(b); - printf("%.3f\n", double(a)); // EXPECTED: -3 - return 0; -} +Nonlinear task min(F(c)) is solved, where + F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2, -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    +    * N is a number of points,
    +    * M is a dimension of a space points belong to,
    +    * K is a dimension of a space of parameters being fitted,
    +    * x is a set of N points, each of them is an M-dimensional vector,
    +    * c is a K-dimensional vector of parameters being fitted
     
    -using namespace alglib;
    +This subroutine uses only f(c,x[i]) and its gradient.
     
    +INPUT PARAMETERS:
    +    X       -   array[0..N-1,0..M-1], points (one row = one point)
    +    Y       -   array[0..N-1], function values.
    +    C       -   array[0..K-1], initial approximation to the solution,
    +    N       -   number of points, N>1
    +    M       -   dimension of space
    +    K       -   number of parameters being fitted
    +    CheapFG -   boolean flag, which is:
    +                * True  if both function and gradient calculation complexity
    +                        are less than O(M^2).  An improved  algorithm  can
    +                        be  used  which corresponds  to  FGJ  scheme  from
    +                        MINLM unit.
    +                * False otherwise.
    +                        Standard Jacibian-bases  Levenberg-Marquardt  algo
    +                        will be used (FJ scheme).
     
    -int main(int argc, char **argv)
    -{
    -    real_2d_array b = "[[5,4],[4,5]]";
    -    double a;
    -    a = rmatrixdet(b, 2);
    -    printf("%.3f\n", double(a)); // EXPECTED: 9
    -    return 0;
    -}
    +OUTPUT PARAMETERS:
    +    State   -   structure which stores algorithm state
     
    +  -- ALGLIB --
    +     Copyright 17.08.2009 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::lsfitcreatefg( + real_2d_array x, + real_1d_array y, + real_1d_array c, + bool cheapfg, + lsfitstate& state, + const xparams _params = alglib::xdefault); +void alglib::lsfitcreatefg( + real_2d_array x, + real_1d_array y, + real_1d_array c, + ae_int_t n, + ae_int_t m, + ae_int_t k, + bool cheapfg, + lsfitstate& state, + const xparams _params = alglib::xdefault); -
    + +

    Examples:   [1]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +Nonlinear least squares fitting using gradient/Hessian, without individial +weights. +Nonlinear task min(F(c)) is solved, where -int main(int argc, char **argv) -{ - complex_2d_array b = "[[1+1i,2],[2,1-1i]]"; - alglib::complex a; - a = cmatrixdet(b); - printf("%s\n", a.tostring(3).c_str()); // EXPECTED: -2 - return 0; -} + F(c) = ((f(c,x[0])-y[0]))^2 + ... + ((f(c,x[n-1])-y[n-1]))^2, + * N is a number of points, + * M is a dimension of a space points belong to, + * K is a dimension of a space of parameters being fitted, + * x is a set of N points, each of them is an M-dimensional vector, + * c is a K-dimensional vector of parameters being fitted -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    +This subroutine uses f(c,x[i]), its gradient and its Hessian.
     
    -using namespace alglib;
    +INPUT PARAMETERS:
    +    X       -   array[0..N-1,0..M-1], points (one row = one point)
    +    Y       -   array[0..N-1], function values.
    +    C       -   array[0..K-1], initial approximation to the solution,
    +    N       -   number of points, N>1
    +    M       -   dimension of space
    +    K       -   number of parameters being fitted
     
    +OUTPUT PARAMETERS:
    +    State   -   structure which stores algorithm state
     
    -int main(int argc, char **argv)
    -{
    -    alglib::complex a;
    -    complex_2d_array b = "[[5i,4],[4i,5]]";
    -    a = cmatrixdet(b, 2);
    -    printf("%s\n", a.tostring(3).c_str()); // EXPECTED: 9i
    -    return 0;
    -}
     
    +  -- ALGLIB --
    +     Copyright 17.08.2009 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::lsfitcreatefgh( + real_2d_array x, + real_1d_array y, + real_1d_array c, + lsfitstate& state, + const xparams _params = alglib::xdefault); +void alglib::lsfitcreatefgh( + real_2d_array x, + real_1d_array y, + real_1d_array c, + ae_int_t n, + ae_int_t m, + ae_int_t k, + lsfitstate& state, + const xparams _params = alglib::xdefault); -
    + +

    Examples:   [1]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    +
    /************************************************************************* +Weighted nonlinear least squares fitting using function values only. -using namespace alglib; +Combination of numerical differentiation and secant updates is used to +obtain function Jacobian. +Nonlinear task min(F(c)) is solved, where -int main(int argc, char **argv) -{ - alglib::complex a; - complex_2d_array b = "[[9,1],[2,1]]"; - a = cmatrixdet(b); - printf("%s\n", a.tostring(3).c_str()); // EXPECTED: 7 - return 0; -} + F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, + * N is a number of points, + * M is a dimension of a space points belong to, + * K is a dimension of a space of parameters being fitted, + * w is an N-dimensional vector of weight coefficients, + * x is a set of N points, each of them is an M-dimensional vector, + * c is a K-dimensional vector of parameters being fitted -
    - - -
    -
    /************************************************************************* -Generation of random NxN complex matrix with given condition number C and -norm2(A)=1 +This subroutine uses only f(c,x[i]). INPUT PARAMETERS: - N - matrix size - C - condition number (in 2-norm) + X - array[0..N-1,0..M-1], points (one row = one point) + Y - array[0..N-1], function values. + W - weights, array[0..N-1] + C - array[0..K-1], initial approximation to the solution, + N - number of points, N>1 + M - dimension of space + K - number of parameters being fitted + DiffStep- numerical differentiation step; + should not be very small or large; + large = loss of accuracy + small = growth of round-off errors OUTPUT PARAMETERS: - A - random matrix with norm2(A)=1 and cond(A)=C + State - structure which stores algorithm state - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey + -- ALGLIB -- + Copyright 18.10.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixrndcond(ae_int_t n, double c, complex_2d_array& a); - -
    - -
    -
    /************************************************************************* -Generation of a random Haar distributed orthogonal complex matrix - -INPUT PARAMETERS: - N - matrix size, N>=1 - -OUTPUT PARAMETERS: - A - orthogonal NxN matrix, array[0..N-1,0..N-1] - -NOTE: this function uses algorithm described in Stewart, G. W. (1980), - "The Efficient Generation of Random Orthogonal Matrices with an - Application to Condition Estimators". - - Speaking short, to generate an (N+1)x(N+1) orthogonal matrix, it: - * takes an NxN one - * takes uniformly distributed unit vector of dimension N+1. - * constructs a Householder reflection from the vector, then applies - it to the smaller matrix (embedded in the larger size with a 1 at - the bottom right corner). - - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::cmatrixrndorthogonal(ae_int_t n, complex_2d_array& a); +
    void alglib::lsfitcreatewf( + real_2d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array c, + double diffstep, + lsfitstate& state, + const xparams _params = alglib::xdefault); +void alglib::lsfitcreatewf( + real_2d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array c, + ae_int_t n, + ae_int_t m, + ae_int_t k, + double diffstep, + lsfitstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Multiplication of MxN complex matrix by MxM random Haar distributed -complex orthogonal matrix +Weighted nonlinear least squares fitting using gradient only. -INPUT PARAMETERS: - A - matrix, array[0..M-1, 0..N-1] - M, N- matrix size +Nonlinear task min(F(c)) is solved, where -OUTPUT PARAMETERS: - A - Q*A, where Q is random MxM orthogonal matrix + F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::cmatrixrndorthogonalfromtheleft( - complex_2d_array& a, - ae_int_t m, - ae_int_t n); + * N is a number of points, + * M is a dimension of a space points belong to, + * K is a dimension of a space of parameters being fitted, + * w is an N-dimensional vector of weight coefficients, + * x is a set of N points, each of them is an M-dimensional vector, + * c is a K-dimensional vector of parameters being fitted -
    - -
    -
    /************************************************************************* -Multiplication of MxN complex matrix by NxN random Haar distributed -complex orthogonal matrix +This subroutine uses only f(c,x[i]) and its gradient. INPUT PARAMETERS: - A - matrix, array[0..M-1, 0..N-1] - M, N- matrix size + X - array[0..N-1,0..M-1], points (one row = one point) + Y - array[0..N-1], function values. + W - weights, array[0..N-1] + C - array[0..K-1], initial approximation to the solution, + N - number of points, N>1 + M - dimension of space + K - number of parameters being fitted + CheapFG - boolean flag, which is: + * True if both function and gradient calculation complexity + are less than O(M^2). An improved algorithm can + be used which corresponds to FGJ scheme from + MINLM unit. + * False otherwise. + Standard Jacibian-bases Levenberg-Marquardt algo + will be used (FJ scheme). OUTPUT PARAMETERS: - A - A*Q, where Q is random NxN orthogonal matrix + State - structure which stores algorithm state - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey +See also: + LSFitResults + LSFitCreateFG (fitting without weights) + LSFitCreateWFGH (fitting using Hessian) + LSFitCreateFGH (fitting using Hessian, without weights) + + -- ALGLIB -- + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixrndorthogonalfromtheright( - complex_2d_array& a, +
    void alglib::lsfitcreatewfg( + real_2d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array c, + bool cheapfg, + lsfitstate& state, + const xparams _params = alglib::xdefault); +void alglib::lsfitcreatewfg( + real_2d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array c, + ae_int_t n, ae_int_t m, - ae_int_t n); + ae_int_t k, + bool cheapfg, + lsfitstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Generation of random NxN Hermitian matrix with given condition number and -norm2(A)=1 +Weighted nonlinear least squares fitting using gradient/Hessian. -INPUT PARAMETERS: - N - matrix size - C - condition number (in 2-norm) +Nonlinear task min(F(c)) is solved, where -OUTPUT PARAMETERS: - A - random matrix with norm2(A)=1 and cond(A)=C + F(c) = (w[0]*(f(c,x[0])-y[0]))^2 + ... + (w[n-1]*(f(c,x[n-1])-y[n-1]))^2, - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::hmatrixrndcond(ae_int_t n, double c, complex_2d_array& a); + * N is a number of points, + * M is a dimension of a space points belong to, + * K is a dimension of a space of parameters being fitted, + * w is an N-dimensional vector of weight coefficients, + * x is a set of N points, each of them is an M-dimensional vector, + * c is a K-dimensional vector of parameters being fitted -
    - -
    -
    /************************************************************************* -Hermitian multiplication of NxN matrix by random Haar distributed -complex orthogonal matrix +This subroutine uses f(c,x[i]), its gradient and its Hessian. INPUT PARAMETERS: - A - matrix, array[0..N-1, 0..N-1] - N - matrix size + X - array[0..N-1,0..M-1], points (one row = one point) + Y - array[0..N-1], function values. + W - weights, array[0..N-1] + C - array[0..K-1], initial approximation to the solution, + N - number of points, N>1 + M - dimension of space + K - number of parameters being fitted OUTPUT PARAMETERS: - A - Q^H*A*Q, where Q is random NxN orthogonal matrix + State - structure which stores algorithm state - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey + -- ALGLIB -- + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hmatrixrndmultiply(complex_2d_array& a, ae_int_t n); +
    void alglib::lsfitcreatewfgh( + real_2d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array c, + lsfitstate& state, + const xparams _params = alglib::xdefault); +void alglib::lsfitcreatewfgh( + real_2d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array c, + ae_int_t n, + ae_int_t m, + ae_int_t k, + lsfitstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Generation of random NxN Hermitian positive definite matrix with given -condition number and norm2(A)=1 +This family of functions is used to launcn iterations of nonlinear fitter -INPUT PARAMETERS: - N - matrix size - C - condition number (in 2-norm) +These functions accept following parameters: + state - algorithm state + func - callback which calculates function (or merit function) + value func at given point x + grad - callback which calculates function (or merit function) + value func and gradient grad at given point x + hess - callback which calculates function (or merit function) + value func, gradient grad and Hessian hess at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL -OUTPUT PARAMETERS: - A - random HPD matrix with norm2(A)=1 and cond(A)=C +NOTES: - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::hpdmatrixrndcond(ae_int_t n, double c, complex_2d_array& a); +1. this algorithm is somewhat unusual because it works with parameterized + function f(C,X), where X is a function argument (we have many points + which are characterized by different argument values), and C is a + parameter to fit. -
    - -
    -
    /************************************************************************* -Generation of random NxN matrix with given condition number and norm2(A)=1 + For example, if we want to do linear fit by f(c0,c1,x) = c0*x+c1, then + x will be argument, and {c0,c1} will be parameters. -INPUT PARAMETERS: - N - matrix size - C - condition number (in 2-norm) + It is important to understand that this algorithm finds minimum in the + space of function PARAMETERS (not arguments), so it needs derivatives + of f() with respect to C, not X. -OUTPUT PARAMETERS: - A - random matrix with norm2(A)=1 and cond(A)=C + In the example above it will need f=c0*x+c1 and {df/dc0,df/dc1} = {x,1} + instead of {df/dx} = {c0}. - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::rmatrixrndcond(ae_int_t n, double c, real_2d_array& a); +2. Callback functions accept C as the first parameter, and X as the second + +3. If state was created with LSFitCreateFG(), algorithm needs just + function and its gradient, but if state was created with + LSFitCreateFGH(), algorithm will need function, gradient and Hessian. + + According to the said above, there ase several versions of this + function, which accept different sets of callbacks. + + This flexibility opens way to subtle errors - you may create state with + LSFitCreateFGH() (optimization using Hessian), but call function which + does not accept Hessian. So when algorithm will request Hessian, there + will be no callback to call. In this case exception will be thrown. + + Be careful to avoid such errors because there is no way to find them at + compile time - you can see them at runtime only. + -- ALGLIB -- + Copyright 17.08.2009 by Bochkanov Sergey +*************************************************************************/ +
    void lsfitfit(lsfitstate &state, + void (*func)(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr), + void (*rep)(const real_1d_array &c, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void lsfitfit(lsfitstate &state, + void (*func)(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr), + void (*grad)(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), + void (*rep)(const real_1d_array &c, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void lsfitfit(lsfitstate &state, + void (*func)(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr), + void (*grad)(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), + void (*hess)(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr), + void (*rep)(const real_1d_array &c, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    +
     
    /************************************************************************* -Generation of a random uniformly distributed (Haar) orthogonal matrix +Linear least squares fitting. + +QR decomposition is used to reduce task to MxM, then triangular solver or +SVD-based solver is used depending on condition number of the system. It +allows to maximize speed and retain decent accuracy. + +IMPORTANT: if you want to perform polynomial fitting, it may be more + convenient to use PolynomialFit() function. This function gives + best results on polynomial problems and solves numerical + stability issues which arise when you fit high-degree + polynomials to your data. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - N - matrix size, N>=1 + Y - array[0..N-1] Function values in N points. + FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. + FMatrix[I, J] - value of J-th basis function in I-th point. + N - number of points used. N>=1. + M - number of basis functions, M>=1. OUTPUT PARAMETERS: - A - orthogonal NxN matrix, array[0..N-1,0..N-1] + Info - error code: + * -4 internal SVD decomposition subroutine failed (very + rare and for degenerate systems only) + * 1 task is solved + C - decomposition coefficients, array[0..M-1] + Rep - fitting report. Following fields are set: + * Rep.TaskRCond reciprocal of condition number + * R2 non-adjusted coefficient of determination + (non-weighted) + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED -NOTE: this function uses algorithm described in Stewart, G. W. (1980), - "The Efficient Generation of Random Orthogonal Matrices with an - Application to Condition Estimators". +ERRORS IN PARAMETERS - Speaking short, to generate an (N+1)x(N+1) orthogonal matrix, it: - * takes an NxN one - * takes uniformly distributed unit vector of dimension N+1. - * constructs a Householder reflection from the vector, then applies - it to the smaller matrix (embedded in the larger size with a 1 at - the bottom right corner). +This solver also calculates different kinds of errors in parameters and +fills corresponding fields of report: +* Rep.CovPar covariance matrix for parameters, array[K,K]. +* Rep.ErrPar errors in parameters, array[K], + errpar = sqrt(diag(CovPar)) +* Rep.ErrCurve vector of fit errors - standard deviations of empirical + best-fit curve from "ideal" best-fit curve built with + infinite number of samples, array[N]. + errcurve = sqrt(diag(F*CovPar*F')), + where F is functions matrix. +* Rep.Noise vector of per-point estimates of noise, array[N] - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::rmatrixrndorthogonal(ae_int_t n, real_2d_array& a); +NOTE: noise in the data is estimated as follows: + * for fitting without user-supplied weights all points are + assumed to have same level of noise, which is estimated from + the data + * for fitting with user-supplied weights we assume that noise + level in I-th point is inversely proportional to Ith weight. + Coefficient of proportionality is estimated from the data. -
    - -
    -
    /************************************************************************* -Multiplication of MxN matrix by MxM random Haar distributed orthogonal matrix +NOTE: we apply small amount of regularization when we invert squared + Jacobian and calculate covariance matrix. It guarantees that + algorithm won't divide by zero during inversion, but skews + error estimates a bit (fractional error is about 10^-9). -INPUT PARAMETERS: - A - matrix, array[0..M-1, 0..N-1] - M, N- matrix size + However, we believe that this difference is insignificant for + all practical purposes except for the situation when you want + to compare ALGLIB results with "reference" implementation up + to the last significant digit. -OUTPUT PARAMETERS: - A - Q*A, where Q is random MxM orthogonal matrix +NOTE: covariance matrix is estimated using correction for degrees + of freedom (covariances are divided by N-M instead of dividing + by N). - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey + -- ALGLIB -- + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixrndorthogonalfromtheleft( - real_2d_array& a, +
    void alglib::lsfitlinear( + real_1d_array y, + real_2d_array fmatrix, + ae_int_t& info, + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::lsfitlinear( + real_1d_array y, + real_2d_array fmatrix, + ae_int_t n, ae_int_t m, - ae_int_t n); + ae_int_t& info, + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Multiplication of MxN matrix by NxN random Haar distributed orthogonal matrix +Constained linear least squares fitting. + +This is variation of LSFitLinear(), which searchs for min|A*x=b| given +that K additional constaints C*x=bc are satisfied. It reduces original +task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinear() +is called. + +IMPORTANT: if you want to perform polynomial fitting, it may be more + convenient to use PolynomialFit() function. This function gives + best results on polynomial problems and solves numerical + stability issues which arise when you fit high-degree + polynomials to your data. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - A - matrix, array[0..M-1, 0..N-1] - M, N- matrix size + Y - array[0..N-1] Function values in N points. + FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. + FMatrix[I,J] - value of J-th basis function in I-th point. + CMatrix - a table of constaints, array[0..K-1,0..M]. + I-th row of CMatrix corresponds to I-th linear constraint: + CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] + N - number of points used. N>=1. + M - number of basis functions, M>=1. + K - number of constraints, 0 <= K < M + K=0 corresponds to absence of constraints. OUTPUT PARAMETERS: - A - A*Q, where Q is random NxN orthogonal matrix + Info - error code: + * -4 internal SVD decomposition subroutine failed (very + rare and for degenerate systems only) + * -3 either too many constraints (M or more), + degenerate constraints (some constraints are + repetead twice) or inconsistent constraints were + specified. + * 1 task is solved + C - decomposition coefficients, array[0..M-1] + Rep - fitting report. Following fields are set: + * R2 non-adjusted coefficient of determination + (non-weighted) + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::rmatrixrndorthogonalfromtheright( - real_2d_array& a, - ae_int_t m, - ae_int_t n); +IMPORTANT: + this subroitine doesn't calculate task's condition number for K<>0. -
    - -
    -
    /************************************************************************* -Generation of random NxN symmetric matrix with given condition number and -norm2(A)=1 +ERRORS IN PARAMETERS -INPUT PARAMETERS: - N - matrix size - C - condition number (in 2-norm) +This solver also calculates different kinds of errors in parameters and +fills corresponding fields of report: +* Rep.CovPar covariance matrix for parameters, array[K,K]. +* Rep.ErrPar errors in parameters, array[K], + errpar = sqrt(diag(CovPar)) +* Rep.ErrCurve vector of fit errors - standard deviations of empirical + best-fit curve from "ideal" best-fit curve built with + infinite number of samples, array[N]. + errcurve = sqrt(diag(F*CovPar*F')), + where F is functions matrix. +* Rep.Noise vector of per-point estimates of noise, array[N] -OUTPUT PARAMETERS: - A - random matrix with norm2(A)=1 and cond(A)=C +IMPORTANT: errors in parameters are calculated without taking into + account boundary/linear constraints! Presence of constraints + changes distribution of errors, but there is no easy way to + account for constraints when you calculate covariance matrix. - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::smatrixrndcond(ae_int_t n, double c, real_2d_array& a); +NOTE: noise in the data is estimated as follows: + * for fitting without user-supplied weights all points are + assumed to have same level of noise, which is estimated from + the data + * for fitting with user-supplied weights we assume that noise + level in I-th point is inversely proportional to Ith weight. + Coefficient of proportionality is estimated from the data. -
    - -
    -
    /************************************************************************* -Symmetric multiplication of NxN matrix by random Haar distributed -orthogonal matrix +NOTE: we apply small amount of regularization when we invert squared + Jacobian and calculate covariance matrix. It guarantees that + algorithm won't divide by zero during inversion, but skews + error estimates a bit (fractional error is about 10^-9). -INPUT PARAMETERS: - A - matrix, array[0..N-1, 0..N-1] - N - matrix size + However, we believe that this difference is insignificant for + all practical purposes except for the situation when you want + to compare ALGLIB results with "reference" implementation up + to the last significant digit. -OUTPUT PARAMETERS: - A - Q'*A*Q, where Q is random NxN orthogonal matrix +NOTE: covariance matrix is estimated using correction for degrees + of freedom (covariances are divided by N-M instead of dividing + by N). - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey + -- ALGLIB -- + Copyright 07.09.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::smatrixrndmultiply(real_2d_array& a, ae_int_t n); +
    void alglib::lsfitlinearc( + real_1d_array y, + real_2d_array fmatrix, + real_2d_array cmatrix, + ae_int_t& info, + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::lsfitlinearc( + real_1d_array y, + real_2d_array fmatrix, + real_2d_array cmatrix, + ae_int_t n, + ae_int_t m, + ae_int_t k, + ae_int_t& info, + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Generation of random NxN symmetric positive definite matrix with given -condition number and norm2(A)=1 - -INPUT PARAMETERS: - N - matrix size - C - condition number (in 2-norm) - -OUTPUT PARAMETERS: - A - random SPD matrix with norm2(A)=1 and cond(A)=C - - -- ALGLIB routine -- - 04.12.2009 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::spdmatrixrndcond(ae_int_t n, double c, real_2d_array& a); - -
    - - - -
    -
    /************************************************************************* -Matrix inverse report: -* R1 reciprocal of condition number in 1-norm -* RInf reciprocal of condition number in inf-norm -*************************************************************************/ -
    class matinvreport -{ - double r1; - double rinf; -}; +Weighted linear least squares fitting. -
    - -
    -
    /************************************************************************* -Inversion of a general matrix. +QR decomposition is used to reduce task to MxM, then triangular solver or +SVD-based solver is used depending on condition number of the system. It +allows to maximize speed and retain decent accuracy. -COMMERCIAL EDITION OF ALGLIB: +IMPORTANT: if you want to perform polynomial fitting, it may be more + convenient to use PolynomialFit() function. This function gives + best results on polynomial problems and solves numerical + stability issues which arise when you fit high-degree + polynomials to your data. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that matrix inversion is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -Input parameters: - A - matrix - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) +INPUT PARAMETERS: + Y - array[0..N-1] Function values in N points. + W - array[0..N-1] Weights corresponding to function values. + Each summand in square sum of approximation deviations + from given values is multiplied by the square of + corresponding weight. + FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. + FMatrix[I, J] - value of J-th basis function in I-th point. + N - number of points used. N>=1. + M - number of basis functions, M>=1. -Output parameters: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse +OUTPUT PARAMETERS: + Info - error code: + * -4 internal SVD decomposition subroutine failed (very + rare and for degenerate systems only) + * -1 incorrect N/M were specified + * 1 task is solved + C - decomposition coefficients, array[0..M-1] + Rep - fitting report. Following fields are set: + * Rep.TaskRCond reciprocal of condition number + * R2 non-adjusted coefficient of determination + (non-weighted) + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED + +ERRORS IN PARAMETERS + +This solver also calculates different kinds of errors in parameters and +fills corresponding fields of report: +* Rep.CovPar covariance matrix for parameters, array[K,K]. +* Rep.ErrPar errors in parameters, array[K], + errpar = sqrt(diag(CovPar)) +* Rep.ErrCurve vector of fit errors - standard deviations of empirical + best-fit curve from "ideal" best-fit curve built with + infinite number of samples, array[N]. + errcurve = sqrt(diag(F*CovPar*F')), + where F is functions matrix. +* Rep.Noise vector of per-point estimates of noise, array[N] + +NOTE: noise in the data is estimated as follows: + * for fitting without user-supplied weights all points are + assumed to have same level of noise, which is estimated from + the data + * for fitting with user-supplied weights we assume that noise + level in I-th point is inversely proportional to Ith weight. + Coefficient of proportionality is estimated from the data. + +NOTE: we apply small amount of regularization when we invert squared + Jacobian and calculate covariance matrix. It guarantees that + algorithm won't divide by zero during inversion, but skews + error estimates a bit (fractional error is about 10^-9). + + However, we believe that this difference is insignificant for + all practical purposes except for the situation when you want + to compare ALGLIB results with "reference" implementation up + to the last significant digit. + +NOTE: covariance matrix is estimated using correction for degrees + of freedom (covariances are divided by N-M instead of dividing + by N). -- ALGLIB -- - Copyright 2005 by Bochkanov Sergey + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixinverse( - complex_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::cmatrixinverse( - complex_2d_array& a, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_cmatrixinverse( - complex_2d_array& a, +
    void alglib::lsfitlinearw( + real_1d_array y, + real_1d_array w, + real_2d_array fmatrix, ae_int_t& info, - matinvreport& rep); -void alglib::smp_cmatrixinverse( - complex_2d_array& a, + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::lsfitlinearw( + real_1d_array y, + real_1d_array w, + real_2d_array fmatrix, ae_int_t n, + ae_int_t m, ae_int_t& info, - matinvreport& rep); + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Inversion of a matrix given by its LU decomposition. +Weighted constained linear least squares fitting. -COMMERCIAL EDITION OF ALGLIB: +This is variation of LSFitLinearW(), which searchs for min|A*x=b| given +that K additional constaints C*x=bc are satisfied. It reduces original +task to modified one: min|B*y-d| WITHOUT constraints, then LSFitLinearW() +is called. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. +IMPORTANT: if you want to perform polynomial fitting, it may be more + convenient to use PolynomialFit() function. This function gives + best results on polynomial problems and solves numerical + stability issues which arise when you fit high-degree + polynomials to your data. + + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that matrix inversion is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - A - LU decomposition of the matrix - (output of CMatrixLU subroutine). - Pivots - table of permutations - (the output of CMatrixLU subroutine). - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) + Y - array[0..N-1] Function values in N points. + W - array[0..N-1] Weights corresponding to function values. + Each summand in square sum of approximation deviations + from given values is multiplied by the square of + corresponding weight. + FMatrix - a table of basis functions values, array[0..N-1, 0..M-1]. + FMatrix[I,J] - value of J-th basis function in I-th point. + CMatrix - a table of constaints, array[0..K-1,0..M]. + I-th row of CMatrix corresponds to I-th linear constraint: + CMatrix[I,0]*C[0] + ... + CMatrix[I,M-1]*C[M-1] = CMatrix[I,M] + N - number of points used. N>=1. + M - number of basis functions, M>=1. + K - number of constraints, 0 <= K < M + K=0 corresponds to absence of constraints. OUTPUT PARAMETERS: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse - - -- ALGLIB routine -- - 05.02.2010 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::cmatrixluinverse( - complex_2d_array& a, - integer_1d_array pivots, - ae_int_t& info, - matinvreport& rep); -void alglib::cmatrixluinverse( - complex_2d_array& a, - integer_1d_array pivots, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_cmatrixluinverse( - complex_2d_array& a, - integer_1d_array pivots, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_cmatrixluinverse( - complex_2d_array& a, - integer_1d_array pivots, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); + Info - error code: + * -4 internal SVD decomposition subroutine failed (very + rare and for degenerate systems only) + * -3 either too many constraints (M or more), + degenerate constraints (some constraints are + repetead twice) or inconsistent constraints were + specified. + * 1 task is solved + C - decomposition coefficients, array[0..M-1] + Rep - fitting report. Following fields are set: + * R2 non-adjusted coefficient of determination + (non-weighted) + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED -
    - -
    -
    /************************************************************************* -Triangular matrix inverse (complex) +IMPORTANT: + this subroitine doesn't calculate task's condition number for K<>0. -The subroutine inverts the following types of matrices: - * upper triangular - * upper triangular with unit diagonal - * lower triangular - * lower triangular with unit diagonal +ERRORS IN PARAMETERS -In case of an upper (lower) triangular matrix, the inverse matrix will -also be upper (lower) triangular, and after the end of the algorithm, the -inverse matrix replaces the source matrix. The elements below (above) the -main diagonal are not changed by the algorithm. +This solver also calculates different kinds of errors in parameters and +fills corresponding fields of report: +* Rep.CovPar covariance matrix for parameters, array[K,K]. +* Rep.ErrPar errors in parameters, array[K], + errpar = sqrt(diag(CovPar)) +* Rep.ErrCurve vector of fit errors - standard deviations of empirical + best-fit curve from "ideal" best-fit curve built with + infinite number of samples, array[N]. + errcurve = sqrt(diag(F*CovPar*F')), + where F is functions matrix. +* Rep.Noise vector of per-point estimates of noise, array[N] -If the matrix has a unit diagonal, the inverse matrix also has a unit -diagonal, and the diagonal elements are not passed to the algorithm. +IMPORTANT: errors in parameters are calculated without taking into + account boundary/linear constraints! Presence of constraints + changes distribution of errors, but there is no easy way to + account for constraints when you calculate covariance matrix. -COMMERCIAL EDITION OF ALGLIB: +NOTE: noise in the data is estimated as follows: + * for fitting without user-supplied weights all points are + assumed to have same level of noise, which is estimated from + the data + * for fitting with user-supplied weights we assume that noise + level in I-th point is inversely proportional to Ith weight. + Coefficient of proportionality is estimated from the data. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that triangular inverse is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +NOTE: we apply small amount of regularization when we invert squared + Jacobian and calculate covariance matrix. It guarantees that + algorithm won't divide by zero during inversion, but skews + error estimates a bit (fractional error is about 10^-9). -Input parameters: - A - matrix, array[0..N-1, 0..N-1]. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) - IsUpper - True, if the matrix is upper triangular. - IsUnit - diagonal type (optional): - * if True, matrix has unit diagonal (a[i,i] are NOT used) - * if False, matrix diagonal is arbitrary - * if not given, False is assumed + However, we believe that this difference is insignificant for + all practical purposes except for the situation when you want + to compare ALGLIB results with "reference" implementation up + to the last significant digit. -Output parameters: - Info - same as for RMatrixLUInverse - Rep - same as for RMatrixLUInverse - A - same as for RMatrixLUInverse. +NOTE: covariance matrix is estimated using correction for degrees + of freedom (covariances are divided by N-M instead of dividing + by N). -- ALGLIB -- - Copyright 05.02.2010 by Bochkanov Sergey + Copyright 07.09.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixtrinverse( - complex_2d_array& a, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::cmatrixtrinverse( - complex_2d_array& a, - ae_int_t n, - bool isupper, - bool isunit, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_cmatrixtrinverse( - complex_2d_array& a, - bool isupper, +
    void alglib::lsfitlinearwc( + real_1d_array y, + real_1d_array w, + real_2d_array fmatrix, + real_2d_array cmatrix, ae_int_t& info, - matinvreport& rep); -void alglib::smp_cmatrixtrinverse( - complex_2d_array& a, + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::lsfitlinearwc( + real_1d_array y, + real_1d_array w, + real_2d_array fmatrix, + real_2d_array cmatrix, ae_int_t n, - bool isupper, - bool isunit, + ae_int_t m, + ae_int_t k, ae_int_t& info, - matinvreport& rep); + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Inversion of a Hermitian positive definite matrix which is given -by Cholesky decomposition. +Nonlinear least squares fitting results. -COMMERCIAL EDITION OF ALGLIB: +Called after return from LSFitFit(). - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. However, Cholesky inversion is a "difficult" - ! algorithm - it has lots of internal synchronization points which - ! prevents efficient parallelization of algorithm. Only very large - ! problems (N=thousands) can be efficiently parallelized. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +INPUT PARAMETERS: + State - algorithm state -Input parameters: - A - Cholesky decomposition of the matrix to be inverted: - A=U’*U or A = L*L'. - Output of HPDMatrixCholesky subroutine. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) - IsUpper - storage type (optional): - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used/changed by - function - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used/changed by - function - * if not given, lower half is used. +OUTPUT PARAMETERS: + Info - completion code: + * -8 optimizer detected NAN/INF in the target + function and/or gradient + * -7 gradient verification failed. + See LSFitSetGradientCheck() for more information. + * -3 inconsistent constraints + * 2 relative step is no more than EpsX. + * 5 MaxIts steps was taken + * 7 stopping conditions are too stringent, + further improvement is impossible + C - array[0..K-1], solution + Rep - optimization report. On success following fields are set: + * R2 non-adjusted coefficient of determination + (non-weighted) + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED + * WRMSError weighted rms error on the (X,Y). -Output parameters: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse +ERRORS IN PARAMETERS - -- ALGLIB routine -- - 10.02.2010 - Bochkanov Sergey +This solver also calculates different kinds of errors in parameters and +fills corresponding fields of report: +* Rep.CovPar covariance matrix for parameters, array[K,K]. +* Rep.ErrPar errors in parameters, array[K], + errpar = sqrt(diag(CovPar)) +* Rep.ErrCurve vector of fit errors - standard deviations of empirical + best-fit curve from "ideal" best-fit curve built with + infinite number of samples, array[N]. + errcurve = sqrt(diag(J*CovPar*J')), + where J is Jacobian matrix. +* Rep.Noise vector of per-point estimates of noise, array[N] + +IMPORTANT: errors in parameters are calculated without taking into + account boundary/linear constraints! Presence of constraints + changes distribution of errors, but there is no easy way to + account for constraints when you calculate covariance matrix. + +NOTE: noise in the data is estimated as follows: + * for fitting without user-supplied weights all points are + assumed to have same level of noise, which is estimated from + the data + * for fitting with user-supplied weights we assume that noise + level in I-th point is inversely proportional to Ith weight. + Coefficient of proportionality is estimated from the data. + +NOTE: we apply small amount of regularization when we invert squared + Jacobian and calculate covariance matrix. It guarantees that + algorithm won't divide by zero during inversion, but skews + error estimates a bit (fractional error is about 10^-9). + + However, we believe that this difference is insignificant for + all practical purposes except for the situation when you want + to compare ALGLIB results with "reference" implementation up + to the last significant digit. + +NOTE: covariance matrix is estimated using correction for degrees + of freedom (covariances are divided by N-M instead of dividing + by N). + + -- ALGLIB -- + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixcholeskyinverse( - complex_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::hpdmatrixcholeskyinverse( - complex_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_hpdmatrixcholeskyinverse( - complex_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_hpdmatrixcholeskyinverse( - complex_2d_array& a, - ae_int_t n, - bool isupper, +
    void alglib::lsfitresults( + lsfitstate state, ae_int_t& info, - matinvreport& rep); + real_1d_array& c, + lsfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    +
     
    /************************************************************************* -Inversion of a Hermitian positive definite matrix. +This function sets boundary constraints for underlying optimizer -Given an upper or lower triangle of a Hermitian positive definite matrix, -the algorithm generates matrix A^-1 and saves the upper or lower triangle -depending on the input. +Boundary constraints are inactive by default (after initial creation). +They are preserved until explicitly turned off with another SetBC() call. -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[K]. + If some (all) variables are unbounded, you may specify + very small number or -INF (latter is recommended because + it will allow solver to use better algorithm). + BndU - upper bounds, array[K]. + If some (all) variables are unbounded, you may specify + very large number or +INF (latter is recommended because + it will allow solver to use better algorithm). - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. However, Cholesky inversion is a "difficult" - ! algorithm - it has lots of internal synchronization points which - ! prevents efficient parallelization of algorithm. Only very large - ! problems (N=thousands) can be efficiently parallelized. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -Input parameters: - A - matrix to be inverted (upper or lower triangle). - Array with elements [0..N-1,0..N-1]. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) - IsUpper - storage type (optional): - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used/changed by - function - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used/changed by - function - * if not given, both lower and upper triangles must be - filled. +NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th +variable will be "frozen" at X[i]=BndL[i]=BndU[i]. -Output parameters: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse +NOTE 2: unlike other constrained optimization algorithms, this solver has +following useful properties: +* bound constraints are always satisfied exactly +* function is evaluated only INSIDE area specified by bound constraints - -- ALGLIB routine -- - 10.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hpdmatrixinverse( - complex_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::hpdmatrixinverse( - complex_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_hpdmatrixinverse( - complex_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_hpdmatrixinverse( - complex_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); +
    void alglib::lsfitsetbc( + lsfitstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Inversion of a general matrix. - -COMMERCIAL EDITION OF ALGLIB: +Stopping conditions for nonlinear least squares fitting. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that matrix inversion is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsX - >=0 + The subroutine finishes its work if on k+1-th iteration + the condition |v|<=EpsX is fulfilled, where: + * |.| means Euclidian norm + * v - scaled step vector, v[i]=dx[i]/s[i] + * dx - ste pvector, dx=X(k+1)-X(k) + * s - scaling coefficients set by LSFitSetScale() + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. Only Levenberg-Marquardt + iterations are counted (L-BFGS/CG iterations are NOT + counted because their cost is very low compared to that of + LM). -Input parameters: - A - matrix. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) +NOTE -Output parameters: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse +Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic +stopping criterion selection (according to the scheme used by MINLM unit). -Result: - True, if the matrix is not singular. - False, if the matrix is singular. -- ALGLIB -- - Copyright 2005-2010 by Bochkanov Sergey + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixinverse( - real_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::rmatrixinverse( - real_2d_array& a, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_rmatrixinverse( - real_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_rmatrixinverse( - real_2d_array& a, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); +
    void alglib::lsfitsetcond( + lsfitstate state, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    +
     
    /************************************************************************* -Inversion of a matrix given by its LU decomposition. - -COMMERCIAL EDITION OF ALGLIB: +This subroutine turns on verification of the user-supplied analytic +gradient: +* user calls this subroutine before fitting begins +* LSFitFit() is called +* prior to actual fitting, for each point in data set X_i and each + component of parameters being fited C_j algorithm performs following + steps: + * two trial steps are made to C_j-TestStep*S[j] and C_j+TestStep*S[j], + where C_j is j-th parameter and S[j] is a scale of j-th parameter + * if needed, steps are bounded with respect to constraints on C[] + * F(X_i|C) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point + * in case difference between prediction and actual value is higher than + some predetermined threshold, algorithm stops with completion code -7; + Rep.VarIdx is set to index of the parameter with incorrect derivative. +* after verification is over, algorithm proceeds to the actual optimization. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that matrix inversion is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +NOTE 1: verification needs N*K (points count * parameters count) gradient + evaluations. It is very costly and you should use it only for low + dimensional problems, when you want to be sure that you've + correctly calculated analytic derivatives. You should not use it + in the production code (unless you want to check derivatives + provided by some third party). -INPUT PARAMETERS: - A - LU decomposition of the matrix - (output of RMatrixLU subroutine). - Pivots - table of permutations - (the output of RMatrixLU subroutine). - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) +NOTE 2: you should carefully choose TestStep. Value which is too large + (so large that function behaviour is significantly non-cubic) will + lead to false alarms. You may use different step for different + parameters by means of setting scale with LSFitSetScale(). -OUTPUT PARAMETERS: - Info - return code: - * -3 A is singular, or VERY close to singular. - it is filled by zeros in such cases. - * 1 task is solved (but matrix A may be ill-conditioned, - check R1/RInf parameters for condition numbers). - Rep - solver report, see below for more info - A - inverse of matrix A. - Array whose indexes range within [0..N-1, 0..N-1]. +NOTE 3: this function may lead to false positives. In case it reports that + I-th derivative was calculated incorrectly, you may decrease test + step and try one more time - maybe your function changes too + sharply and your step is too large for such rapidly chanding + function. -SOLVER REPORT +NOTE 4: this function works only for optimizers created with LSFitCreateWFG() + or LSFitCreateFG() constructors. -Subroutine sets following fields of the Rep structure: -* R1 reciprocal of condition number: 1/cond(A), 1-norm. -* RInf reciprocal of condition number: 1/cond(A), inf-norm. +INPUT PARAMETERS: + State - structure used to store algorithm state + TestStep - verification step: + * TestStep=0 turns verification off + * TestStep>0 activates verification - -- ALGLIB routine -- - 05.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 15.06.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixluinverse( - real_2d_array& a, - integer_1d_array pivots, - ae_int_t& info, - matinvreport& rep); -void alglib::rmatrixluinverse( - real_2d_array& a, - integer_1d_array pivots, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_rmatrixluinverse( - real_2d_array& a, - integer_1d_array pivots, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_rmatrixluinverse( - real_2d_array& a, - integer_1d_array pivots, - ae_int_t n, - ae_int_t& info, - matinvreport& rep); +
    void alglib::lsfitsetgradientcheck( + lsfitstate state, + double teststep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Triangular matrix inverse (real) - -The subroutine inverts the following types of matrices: - * upper triangular - * upper triangular with unit diagonal - * lower triangular - * lower triangular with unit diagonal - -In case of an upper (lower) triangular matrix, the inverse matrix will -also be upper (lower) triangular, and after the end of the algorithm, the -inverse matrix replaces the source matrix. The elements below (above) the -main diagonal are not changed by the algorithm. - -If the matrix has a unit diagonal, the inverse matrix also has a unit -diagonal, and the diagonal elements are not passed to the algorithm. - -COMMERCIAL EDITION OF ALGLIB: +This function sets linear constraints for underlying optimizer - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that triangular inverse is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Linear constraints are inactive by default (after initial creation). +They are preserved until explicitly turned off with another SetLC() call. -Input parameters: - A - matrix, array[0..N-1, 0..N-1]. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) - IsUpper - True, if the matrix is upper triangular. - IsUnit - diagonal type (optional): - * if True, matrix has unit diagonal (a[i,i] are NOT used) - * if False, matrix diagonal is arbitrary - * if not given, False is assumed +INPUT PARAMETERS: + State - structure stores algorithm state + C - linear constraints, array[K,N+1]. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT -Output parameters: - Info - same as for RMatrixLUInverse - Rep - same as for RMatrixLUInverse - A - same as for RMatrixLUInverse. +IMPORTANT: if you have linear constraints, it is strongly recommended to + set scale of variables with lsfitsetscale(). QP solver which is + used to calculate linearly constrained steps heavily relies on + good scaling of input problems. + +NOTE: linear (non-box) constraints are satisfied only approximately - + there always exists some violation due to numerical errors and + algorithmic limitations. + +NOTE: general linear constraints add significant overhead to solution + process. Although solver performs roughly same amount of iterations + (when compared with similar box-only constrained problem), each + iteration now involves solution of linearly constrained QP + subproblem, which requires ~3-5 times more Cholesky decompositions. + Thus, if you can reformulate your problem in such way this it has + only box constraints, it may be beneficial to do so. -- ALGLIB -- - Copyright 05.02.2010 by Bochkanov Sergey + Copyright 29.04.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixtrinverse( - real_2d_array& a, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::rmatrixtrinverse( - real_2d_array& a, - ae_int_t n, - bool isupper, - bool isunit, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_rmatrixtrinverse( - real_2d_array& a, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_rmatrixtrinverse( - real_2d_array& a, - ae_int_t n, - bool isupper, - bool isunit, - ae_int_t& info, - matinvreport& rep); +
    void alglib::lsfitsetlc( + lsfitstate state, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::lsfitsetlc( + lsfitstate state, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Inversion of a symmetric positive definite matrix which is given -by Cholesky decomposition. +This function sets scaling coefficients for underlying optimizer. -COMMERCIAL EDITION OF ALGLIB: +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances). Scale of +the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the function - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. However, Cholesky inversion is a "difficult" - ! algorithm - it has lots of internal synchronization points which - ! prevents efficient parallelization of algorithm. Only very large - ! problems (N=thousands) can be efficiently parallelized. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Generally, scale is NOT considered to be a form of preconditioner. But LM +optimizer is unique in that it uses scaling matrix both in the stopping +condition tests and as Marquardt damping factor. -Input parameters: - A - Cholesky decomposition of the matrix to be inverted: - A=U’*U or A = L*L'. - Output of SPDMatrixCholesky subroutine. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) - IsUpper - storage type (optional): - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used/changed by - function - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used/changed by - function - * if not given, lower half is used. +Proper scaling is very important for the algorithm performance. It is less +important for the quality of results, but still has some influence (it is +easier to converge when variables are properly scaled, so premature +stopping is possible when very badly scalled variables are combined with +relaxed stopping conditions). -Output parameters: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. - -- ALGLIB routine -- - 10.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixcholeskyinverse( - real_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::spdmatrixcholeskyinverse( - real_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_spdmatrixcholeskyinverse( - real_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_spdmatrixcholeskyinverse( - real_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); +
    void alglib::lsfitsetscale( + lsfitstate state, + real_1d_array s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Inversion of a symmetric positive definite matrix. - -Given an upper or lower triangle of a symmetric positive definite matrix, -the algorithm generates matrix A^-1 and saves the upper or lower triangle -depending on the input. - -COMMERCIAL EDITION OF ALGLIB: +This function sets maximum step length - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. However, Cholesky inversion is a "difficult" - ! algorithm - it has lots of internal synchronization points which - ! prevents efficient parallelization of algorithm. Only very large - ! problems (N=thousands) can be efficiently parallelized. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +INPUT PARAMETERS: + State - structure which stores algorithm state + StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't + want to limit step length. -Input parameters: - A - matrix to be inverted (upper or lower triangle). - Array with elements [0..N-1,0..N-1]. - N - size of matrix A (optional) : - * if given, only principal NxN submatrix is processed and - overwritten. other elements are unchanged. - * if not given, size is automatically determined from - matrix size (A must be square matrix) - IsUpper - storage type (optional): - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used/changed by - function - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used/changed by - function - * if not given, both lower and upper triangles must be - filled. +Use this subroutine when you optimize target function which contains exp() +or other fast growing functions, and optimization algorithm makes too +large steps which leads to overflow. This function allows us to reject +steps that are too large (and therefore expose us to the possible +overflow) without actually calculating function value at the x+stp*d. -Output parameters: - Info - return code, same as in RMatrixLUInverse - Rep - solver report, same as in RMatrixLUInverse - A - inverse of matrix A, same as in RMatrixLUInverse +NOTE: non-zero StpMax leads to moderate performance degradation because +intermediate step of preconditioned L-BFGS optimization is incompatible +with limits on step size. - -- ALGLIB routine -- - 10.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spdmatrixinverse( - real_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::spdmatrixinverse( - real_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_spdmatrixinverse( - real_2d_array& a, - ae_int_t& info, - matinvreport& rep); -void alglib::smp_spdmatrixinverse( - real_2d_array& a, - ae_int_t n, - bool isupper, - ae_int_t& info, - matinvreport& rep); +
    void alglib::lsfitsetstpmax( + lsfitstate state, + double stpmax, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    +
    /************************************************************************* +This function turns on/off reporting. -using namespace alglib; +INPUT PARAMETERS: + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not +When reports are needed, State.C (current parameters) and State.F (current +value of fitting function) are reported. -int main(int argc, char **argv) -{ - complex_2d_array a = "[[1i,-1],[1i,1]]"; - ae_int_t info; - matinvreport rep; - cmatrixinverse(a, info, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[-0.5i,-0.5i],[-0.5,0.5]] - printf("%.4f\n", double(rep.r1)); // EXPECTED: 0.5 - printf("%.4f\n", double(rep.rinf)); // EXPECTED: 0.5 - return 0; -} + -- ALGLIB -- + Copyright 15.08.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::lsfitsetxrep( + lsfitstate state, + bool needxrep, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    -
    +
    /************************************************************************* +This subroutine fits piecewise linear curve to points with Ramer-Douglas- +Peucker algorithm, which stops after achieving desired precision. -int main(int argc, char **argv) -{ - complex_2d_array a = "[[2,1],[1,2]]"; - ae_int_t info; - matinvreport rep; - hpdmatrixinverse(a, info, rep); - printf("%d\n", int(info)); // EXPECTED: 1 - printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[0.666666,-0.333333],[-0.333333,0.666666]] - return 0; -} - - -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    real_2d_array a = "[[1,-1],[1,1]]";
    -    ae_int_t info;
    -    matinvreport rep;
    -    rmatrixinverse(a, info, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 1
    -    printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[0.5,0.5],[-0.5,0.5]]
    -    printf("%.4f\n", double(rep.r1)); // EXPECTED: 0.5
    -    printf("%.4f\n", double(rep.rinf)); // EXPECTED: 0.5
    -    return 0;
    -}
    -
    -
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    +IMPORTANT:
    +* it performs non-least-squares fitting; it builds curve, but  this  curve
    +  does not minimize some least squares  metric.  See  description  of  RDP
    +  algorithm (say, in Wikipedia) for more details on WHAT is performed.
    +* this function does NOT work with parametric curves  (i.e.  curves  which
    +  can be represented as {X(t),Y(t)}. It works with curves   which  can  be
    +  represented as Y(X). Thus, it is impossible to model figures like circles
    +  with this functions.
    +  If  you  want  to  work  with  parametric   curves,   you   should   use
    +  ParametricRDPFixed() function provided  by  "Parametric"  subpackage  of
    +  "Interpolation" package.
     
    +INPUT PARAMETERS:
    +    X       -   array of X-coordinates:
    +                * at least N elements
    +                * can be unordered (points are automatically sorted)
    +                * this function may accept non-distinct X (see below for
    +                  more information on handling of such inputs)
    +    Y       -   array of Y-coordinates:
    +                * at least N elements
    +    N       -   number of elements in X/Y
    +    Eps     -   positive number, desired precision.
     
    -int main(int argc, char **argv)
    -{
    -    real_2d_array a = "[[2,1],[1,2]]";
    -    ae_int_t info;
    -    matinvreport rep;
    -    spdmatrixinverse(a, info, rep);
    -    printf("%d\n", int(info)); // EXPECTED: 1
    -    printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[0.666666,-0.333333],[-0.333333,0.666666]]
    -    return 0;
    -}
     
    +OUTPUT PARAMETERS:
    +    X2      -   X-values of corner points for piecewise approximation,
    +                has length NSections+1 or zero (for NSections=0).
    +    Y2      -   Y-values of corner points,
    +                has length NSections+1 or zero (for NSections=0).
    +    NSections-  number of sections found by algorithm,
    +                NSections can be zero for degenerate datasets
    +                (N<=1 or all X[] are non-distinct).
     
    -
    -
    - -mcpdreport
    -mcpdstate
    - -mcpdaddbc
    -mcpdaddec
    -mcpdaddtrack
    -mcpdcreate
    -mcpdcreateentry
    -mcpdcreateentryexit
    -mcpdcreateexit
    -mcpdresults
    -mcpdsetbc
    -mcpdsetec
    -mcpdsetlc
    -mcpdsetpredictionweights
    -mcpdsetprior
    -mcpdsettikhonovregularizer
    -mcpdsolve
    - - - - -
    mcpd_simple1 Simple unconstrained MCPD model (no entry/exit states)
    mcpd_simple2 Simple MCPD model (no entry/exit states) with equality constraints
    - -
    -
    /************************************************************************* -This structure is a MCPD training report: - InnerIterationsCount - number of inner iterations of the - underlying optimization algorithm - OuterIterationsCount - number of outer iterations of the - underlying optimization algorithm - NFEV - number of merit function evaluations - TerminationType - termination type - (same as for MinBLEIC optimizer, positive - values denote success, negative ones - - failure) +NOTE: X2/Y2 are ordered arrays, i.e. (X2[0],Y2[0]) is a first point of + curve, (X2[NSection-1],Y2[NSection-1]) is the last point. -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey + Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ -
    class mcpdreport -{ - ae_int_t inneriterationscount; - ae_int_t outeriterationscount; - ae_int_t nfev; - ae_int_t terminationtype; -}; +
    void alglib::lstfitpiecewiselinearrdp( + real_1d_array x, + real_1d_array y, + ae_int_t n, + double eps, + real_1d_array& x2, + real_1d_array& y2, + ae_int_t& nsections, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This structure is a MCPD (Markov Chains for Population Data) solver. +This subroutine fits piecewise linear curve to points with Ramer-Douglas- +Peucker algorithm, which stops after generating specified number of linear +sections. -You should use ALGLIB functions in order to work with this object. +IMPORTANT: +* it does NOT perform least-squares fitting; it builds curve, but this + curve does not minimize some least squares metric. See description of + RDP algorithm (say, in Wikipedia) for more details on WHAT is performed. +* this function does NOT work with parametric curves (i.e. curves which + can be represented as {X(t),Y(t)}. It works with curves which can be + represented as Y(X). Thus, it is impossible to model figures like + circles with this functions. + If you want to work with parametric curves, you should use + ParametricRDPFixed() function provided by "Parametric" subpackage of + "Interpolation" package. + +INPUT PARAMETERS: + X - array of X-coordinates: + * at least N elements + * can be unordered (points are automatically sorted) + * this function may accept non-distinct X (see below for + more information on handling of such inputs) + Y - array of Y-coordinates: + * at least N elements + N - number of elements in X/Y + M - desired number of sections: + * at most M sections are generated by this function + * less than M sections can be generated if we have N<M + (or some X are non-distinct). + +OUTPUT PARAMETERS: + X2 - X-values of corner points for piecewise approximation, + has length NSections+1 or zero (for NSections=0). + Y2 - Y-values of corner points, + has length NSections+1 or zero (for NSections=0). + NSections- number of sections found by algorithm, NSections<=M, + NSections can be zero for degenerate datasets + (N<=1 or all X[] are non-distinct). + +NOTE: X2/Y2 are ordered arrays, i.e. (X2[0],Y2[0]) is a first point of + curve, (X2[NSection-1],Y2[NSection-1]) is the last point. -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey + Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ -
    class mcpdstate -{ -}; +
    void alglib::lstfitpiecewiselinearrdpfixed( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t m, + real_1d_array& x2, + real_1d_array& y2, + ae_int_t& nsections, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function is used to add bound constraints on the elements of the -transition matrix P. - -MCPD solver has four types of constraints which can be placed on P: -* user-specified equality constraints (optional) -* user-specified bound constraints (optional) -* user-specified general linear constraints (optional) -* basic constraints (always present): - * non-negativity: P[i,j]>=0 - * consistency: every column of P sums to 1.0 +Fitting by polynomials in barycentric form. This function provides simple +unterface for unconstrained unweighted fitting. See PolynomialFitWC() if +you need constrained fitting. -Final constraints which are passed to the underlying optimizer are -calculated as intersection of all present constraints. For example, you -may specify boundary constraint on P[0,0] and equality one: - 0.1<=P[0,0]<=0.9 - P[0,0]=0.5 -Such combination of constraints will be silently reduced to their -intersection, which is P[0,0]=0.5. +Task is linear, so linear least squares solver is used. Complexity of this +computational scheme is O(N*M^2), mostly dominated by least squares solver -This function can be used to ADD bound constraint for one element of P -without changing constraints for other elements. +SEE ALSO: + PolynomialFitWC() -You can also use MCPDSetBC() function which allows to place bound -constraints on arbitrary subset of elements of P. Set of constraints is -specified by BndL/BndU matrices, which may contain arbitrary combination -of finite numbers or infinities (like -INF<x<=0.5 or 0.1<=x<+INF). +NOTES: + you can convert P from barycentric form to the power or Chebyshev + basis with PolynomialBar2Pow() or PolynomialBar2Cheb() functions from + POLINT subpackage. -These functions (MCPDSetBC and MCPDAddBC) interact as follows: -* there is internal matrix of bound constraints which is stored in the - MCPD solver -* MCPDSetBC() replaces this matrix by another one (SET) -* MCPDAddBC() modifies one element of this matrix and leaves other ones - unchanged (ADD) -* thus MCPDAddBC() call preserves all modifications done by previous - calls, while MCPDSetBC() completely discards all changes done to the - equality constraints. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - S - solver - I - row index of element being constrained - J - column index of element being constrained - BndL - lower bound - BndU - upper bound + X - points, array[0..N-1]. + Y - function values, array[0..N-1]. + N - number of points, N>0 + * if given, only leading N elements of X/Y are used + * if not given, automatically determined from sizes of X/Y + M - number of basis functions (= polynomial_degree + 1), M>=1 - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey +OUTPUT PARAMETERS: + Info- same format as in LSFitLinearW() subroutine: + * Info>0 task is solved + * Info<=0 an error occured: + -4 means inconvergence of internal SVD + P - interpolant in barycentric form. + Rep - report, same format as in LSFitLinearW() subroutine. + Following fields are set: + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED + + -- ALGLIB PROJECT -- + Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mcpdaddbc( - mcpdstate s, - ae_int_t i, - ae_int_t j, - double bndl, - double bndu); +
    void alglib::polynomialfit( + real_1d_array x, + real_1d_array y, + ae_int_t m, + ae_int_t& info, + barycentricinterpolant& p, + polynomialfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::polynomialfit( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t m, + ae_int_t& info, + barycentricinterpolant& p, + polynomialfitreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function is used to add equality constraints on the elements of the -transition matrix P. +Weighted fitting by polynomials in barycentric form, with constraints on +function values or first derivatives. -MCPD solver has four types of constraints which can be placed on P: -* user-specified equality constraints (optional) -* user-specified bound constraints (optional) -* user-specified general linear constraints (optional) -* basic constraints (always present): - * non-negativity: P[i,j]>=0 - * consistency: every column of P sums to 1.0 +Small regularizing term is used when solving constrained tasks (to improve +stability). -Final constraints which are passed to the underlying optimizer are -calculated as intersection of all present constraints. For example, you -may specify boundary constraint on P[0,0] and equality one: - 0.1<=P[0,0]<=0.9 - P[0,0]=0.5 -Such combination of constraints will be silently reduced to their -intersection, which is P[0,0]=0.5. +Task is linear, so linear least squares solver is used. Complexity of this +computational scheme is O(N*M^2), mostly dominated by least squares solver -This function can be used to ADD equality constraint for one element of P -without changing constraints for other elements. +SEE ALSO: + PolynomialFit() -You can also use MCPDSetEC() function which allows you to specify -arbitrary set of equality constraints in one call. +NOTES: + you can convert P from barycentric form to the power or Chebyshev + basis with PolynomialBar2Pow() or PolynomialBar2Cheb() functions from + POLINT subpackage. -These functions (MCPDSetEC and MCPDAddEC) interact as follows: -* there is internal matrix of equality constraints which is stored in the - MCPD solver -* MCPDSetEC() replaces this matrix by another one (SET) -* MCPDAddEC() modifies one element of this matrix and leaves other ones - unchanged (ADD) -* thus MCPDAddEC() call preserves all modifications done by previous - calls, while MCPDSetEC() completely discards all changes done to the - equality constraints. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - S - solver - I - row index of element being constrained - J - column index of element being constrained - C - value (constraint for P[I,J]). Can be either NAN (no - constraint) or finite value from [0,1]. + X - points, array[0..N-1]. + Y - function values, array[0..N-1]. + W - weights, array[0..N-1] + Each summand in square sum of approximation deviations from + given values is multiplied by the square of corresponding + weight. Fill it by 1's if you don't want to solve weighted + task. + N - number of points, N>0. + * if given, only leading N elements of X/Y/W are used + * if not given, automatically determined from sizes of X/Y/W + XC - points where polynomial values/derivatives are constrained, + array[0..K-1]. + YC - values of constraints, array[0..K-1] + DC - array[0..K-1], types of constraints: + * DC[i]=0 means that P(XC[i])=YC[i] + * DC[i]=1 means that P'(XC[i])=YC[i] + SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS + K - number of constraints, 0<=K<M. + K=0 means no constraints (XC/YC/DC are not used in such cases) + M - number of basis functions (= polynomial_degree + 1), M>=1 -NOTES: +OUTPUT PARAMETERS: + Info- same format as in LSFitLinearW() subroutine: + * Info>0 task is solved + * Info<=0 an error occured: + -4 means inconvergence of internal SVD + -3 means inconsistent constraints + P - interpolant in barycentric form. + Rep - report, same format as in LSFitLinearW() subroutine. + Following fields are set: + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED -1. infinite values of C will lead to exception being thrown. Values less -than 0.0 or greater than 1.0 will lead to error code being returned after -call to MCPDSolve(). +IMPORTANT: + this subroitine doesn't calculate task's condition number for K<>0. - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey +SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: + +Setting constraints can lead to undesired results, like ill-conditioned +behavior, or inconsistency being detected. From the other side, it allows +us to improve quality of the fit. Here we summarize our experience with +constrained regression splines: +* even simple constraints can be inconsistent, see Wikipedia article on + this subject: http://en.wikipedia.org/wiki/Birkhoff_interpolation +* the greater is M (given fixed constraints), the more chances that + constraints will be consistent +* in the general case, consistency of constraints is NOT GUARANTEED. +* in the one special cases, however, we can guarantee consistency. This + case is: M>1 and constraints on the function values (NOT DERIVATIVES) + +Our final recommendation is to use constraints WHEN AND ONLY when you +can't solve your task without them. Anything beyond special cases given +above is not guaranteed and may result in inconsistency. + + -- ALGLIB PROJECT -- + Copyright 10.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mcpdaddec(mcpdstate s, ae_int_t i, ae_int_t j, double c); +
    void alglib::polynomialfitwc( + real_1d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t m, + ae_int_t& info, + barycentricinterpolant& p, + polynomialfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::polynomialfitwc( + real_1d_array x, + real_1d_array y, + real_1d_array w, + ae_int_t n, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t k, + ae_int_t m, + ae_int_t& info, + barycentricinterpolant& p, + polynomialfitreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function is used to add a track - sequence of system states at the -different moments of its evolution. - -You may add one or several tracks to the MCPD solver. In case you have -several tracks, they won't overwrite each other. For example, if you pass -two tracks, A1-A2-A3 (system at t=A+1, t=A+2 and t=A+3) and B1-B2-B3, then -solver will try to model transitions from t=A+1 to t=A+2, t=A+2 to t=A+3, -t=B+1 to t=B+2, t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it -wont try to model transition from t=A+3 to t=B+1. - -INPUT PARAMETERS: - S - solver - XY - track, array[K,N]: - * I-th row is a state at t=I - * elements of XY must be non-negative (exception will be - thrown on negative elements) - K - number of points in a track - * if given, only leading K rows of XY are used - * if not given, automatically determined from size of XY +Least squares fitting by cubic spline. -NOTES: +This subroutine is "lightweight" alternative for more complex and feature- +rich Spline1DFitCubicWC(). See Spline1DFitCubicWC() for more information +about subroutine parameters (we don't duplicate it here because of length) -1. Track may contain either proportional or population data: - * with proportional data all rows of XY must sum to 1.0, i.e. we have - proportions instead of absolute population values - * with population data rows of XY contain population counts and generally - do not sum to 1.0 (although they still must be non-negative) + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mcpdaddtrack(mcpdstate s, real_2d_array xy); -void alglib::mcpdaddtrack(mcpdstate s, real_2d_array xy, ae_int_t k); +
    void alglib::spline1dfitcubic( + real_1d_array x, + real_1d_array y, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfitcubic( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -DESCRIPTION: +Weighted fitting by cubic spline, with constraints on function values or +derivatives. -This function creates MCPD (Markov Chains for Population Data) solver. +Equidistant grid with M-2 nodes on [min(x,xc),max(x,xc)] is used to build +basis functions. Basis functions are cubic splines with continuous second +derivatives and non-fixed first derivatives at interval ends. Small +regularizing term is used when solving constrained tasks (to improve +stability). -This solver can be used to find transition matrix P for N-dimensional -prediction problem where transition from X[i] to X[i+1] is modelled as - X[i+1] = P*X[i] -where X[i] and X[i+1] are N-dimensional population vectors (components of -each X are non-negative), and P is a N*N transition matrix (elements of P -are non-negative, each column sums to 1.0). +Task is linear, so linear least squares solver is used. Complexity of this +computational scheme is O(N*M^2), mostly dominated by least squares solver -Such models arise when when: -* there is some population of individuals -* individuals can have different states -* individuals can transit from one state to another -* population size is constant, i.e. there is no new individuals and no one - leaves population -* you want to model transitions of individuals from one state into another +SEE ALSO + Spline1DFitHermiteWC() - fitting by Hermite splines (more flexible, + less smooth) + Spline1DFitCubic() - "lightweight" fitting by cubic splines, + without invididual weights and constraints -USAGE: + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -Here we give very brief outline of the MCPD. We strongly recommend you to -read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide -on data analysis which is available at http://www.alglib.net/dataanalysis/ +INPUT PARAMETERS: + X - points, array[0..N-1]. + Y - function values, array[0..N-1]. + W - weights, array[0..N-1] + Each summand in square sum of approximation deviations from + given values is multiplied by the square of corresponding + weight. Fill it by 1's if you don't want to solve weighted + task. + N - number of points (optional): + * N>0 + * if given, only first N elements of X/Y/W are processed + * if not given, automatically determined from X/Y/W sizes + XC - points where spline values/derivatives are constrained, + array[0..K-1]. + YC - values of constraints, array[0..K-1] + DC - array[0..K-1], types of constraints: + * DC[i]=0 means that S(XC[i])=YC[i] + * DC[i]=1 means that S'(XC[i])=YC[i] + SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS + K - number of constraints (optional): + * 0<=K<M. + * K=0 means no constraints (XC/YC/DC are not used) + * if given, only first K elements of XC/YC/DC are used + * if not given, automatically determined from XC/YC/DC + M - number of basis functions ( = number_of_nodes+2), M>=4. -1. User initializes algorithm state with MCPDCreate() call +OUTPUT PARAMETERS: + Info- same format as in LSFitLinearWC() subroutine. + * Info>0 task is solved + * Info<=0 an error occured: + -4 means inconvergence of internal SVD + -3 means inconsistent constraints + S - spline interpolant. + Rep - report, same format as in LSFitLinearWC() subroutine. + Following fields are set: + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED -2. User adds one or more tracks - sequences of states which describe - evolution of a system being modelled from different starting conditions +IMPORTANT: + this subroitine doesn't calculate task's condition number for K<>0. -3. User may add optional boundary, equality and/or linear constraints on - the coefficients of P by calling one of the following functions: - * MCPDSetEC() to set equality constraints - * MCPDSetBC() to set bound constraints - * MCPDSetLC() to set linear constraints -4. Optionally, user may set custom weights for prediction errors (by - default, algorithm assigns non-equal, automatically chosen weights for - errors in the prediction of different components of X). It can be done - with a call of MCPDSetPredictionWeights() function. +ORDER OF POINTS -5. User calls MCPDSolve() function which takes algorithm state and - pointer (delegate, etc.) to callback function which calculates F/G. +Subroutine automatically sorts points, so caller may pass unsorted array. -6. User calls MCPDResults() to get solution +SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: -INPUT PARAMETERS: - N - problem dimension, N>=1 +Setting constraints can lead to undesired results, like ill-conditioned +behavior, or inconsistency being detected. From the other side, it allows +us to improve quality of the fit. Here we summarize our experience with +constrained regression splines: +* excessive constraints can be inconsistent. Splines are piecewise cubic + functions, and it is easy to create an example, where large number of + constraints concentrated in small area will result in inconsistency. + Just because spline is not flexible enough to satisfy all of them. And + same constraints spread across the [min(x),max(x)] will be perfectly + consistent. +* the more evenly constraints are spread across [min(x),max(x)], the more + chances that they will be consistent +* the greater is M (given fixed constraints), the more chances that + constraints will be consistent +* in the general case, consistency of constraints IS NOT GUARANTEED. +* in the several special cases, however, we CAN guarantee consistency. +* one of this cases is constraints on the function values AND/OR its + derivatives at the interval boundaries. +* another special case is ONE constraint on the function value (OR, but + not AND, derivative) anywhere in the interval -OUTPUT PARAMETERS: - State - structure stores algorithm state +Our final recommendation is to use constraints WHEN AND ONLY WHEN you +can't solve your task without them. Anything beyond special cases given +above is not guaranteed and may result in inconsistency. - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey + + -- ALGLIB PROJECT -- + Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mcpdcreate(ae_int_t n, mcpdstate& s); +
    void alglib::spline1dfitcubicwc( + real_1d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfitcubicwc( + real_1d_array x, + real_1d_array y, + real_1d_array w, + ae_int_t n, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t k, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -DESCRIPTION: - -This function is a specialized version of MCPDCreate() function, and we -recommend you to read comments for this function for general information -about MCPD solver. - -This function creates MCPD (Markov Chains for Population Data) solver -for "Entry-state" model, i.e. model where transition from X[i] to X[i+1] -is modelled as - X[i+1] = P*X[i] -where - X[i] and X[i+1] are N-dimensional state vectors - P is a N*N transition matrix -and one selected component of X[] is called "entry" state and is treated -in a special way: - system state always transits from "entry" state to some another state - system state can not transit from any state into "entry" state -Such conditions basically mean that row of P which corresponds to "entry" -state is zero. - -Such models arise when: -* there is some population of individuals -* individuals can have different states -* individuals can transit from one state to another -* population size is NOT constant - at every moment of time there is some - (unpredictable) amount of "new" individuals, which can transit into one - of the states at the next turn, but still no one leaves population -* you want to model transitions of individuals from one state into another -* but you do NOT want to predict amount of "new" individuals because it - does not depends on individuals already present (hence system can not - transit INTO entry state - it can only transit FROM it). - -This model is discussed in more details in the ALGLIB User Guide (see -http://www.alglib.net/dataanalysis/ for more data). +Least squares fitting by Hermite spline. -INPUT PARAMETERS: - N - problem dimension, N>=2 - EntryState- index of entry state, in 0..N-1 +This subroutine is "lightweight" alternative for more complex and feature- +rich Spline1DFitHermiteWC(). See Spline1DFitHermiteWC() description for +more information about subroutine parameters (we don't duplicate it here +because of length). -OUTPUT PARAMETERS: - State - structure stores algorithm state + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mcpdcreateentry( +
    void alglib::spline1dfithermite( + real_1d_array x, + real_1d_array y, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfithermite( + real_1d_array x, + real_1d_array y, ae_int_t n, - ae_int_t entrystate, - mcpdstate& s); + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -DESCRIPTION: +Weighted fitting by Hermite spline, with constraints on function values +or first derivatives. -This function is a specialized version of MCPDCreate() function, and we -recommend you to read comments for this function for general information -about MCPD solver. +Equidistant grid with M nodes on [min(x,xc),max(x,xc)] is used to build +basis functions. Basis functions are Hermite splines. Small regularizing +term is used when solving constrained tasks (to improve stability). -This function creates MCPD (Markov Chains for Population Data) solver -for "Entry-Exit-states" model, i.e. model where transition from X[i] to -X[i+1] is modelled as - X[i+1] = P*X[i] -where - X[i] and X[i+1] are N-dimensional state vectors - P is a N*N transition matrix -one selected component of X[] is called "entry" state and is treated in a -special way: - system state always transits from "entry" state to some another state - system state can not transit from any state into "entry" state -and another one component of X[] is called "exit" state and is treated in -a special way too: - system state can transit from any state into "exit" state - system state can not transit from "exit" state into any other state - transition operator discards "exit" state (makes it zero at each turn) -Such conditions basically mean that: - row of P which corresponds to "entry" state is zero - column of P which corresponds to "exit" state is zero -Multiplication by such P may decrease sum of vector components. +Task is linear, so linear least squares solver is used. Complexity of this +computational scheme is O(N*M^2), mostly dominated by least squares solver -Such models arise when: -* there is some population of individuals -* individuals can have different states -* individuals can transit from one state to another -* population size is NOT constant -* at every moment of time there is some (unpredictable) amount of "new" - individuals, which can transit into one of the states at the next turn -* some individuals can move (predictably) into "exit" state and leave - population at the next turn -* you want to model transitions of individuals from one state into another, - including transitions from the "entry" state and into the "exit" state. -* but you do NOT want to predict amount of "new" individuals because it - does not depends on individuals already present (hence system can not - transit INTO entry state - it can only transit FROM it). +SEE ALSO + Spline1DFitCubicWC() - fitting by Cubic splines (less flexible, + more smooth) + Spline1DFitHermite() - "lightweight" Hermite fitting, without + invididual weights and constraints -This model is discussed in more details in the ALGLIB User Guide (see -http://www.alglib.net/dataanalysis/ for more data). + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - N - problem dimension, N>=2 - EntryState- index of entry state, in 0..N-1 - ExitState- index of exit state, in 0..N-1 + X - points, array[0..N-1]. + Y - function values, array[0..N-1]. + W - weights, array[0..N-1] + Each summand in square sum of approximation deviations from + given values is multiplied by the square of corresponding + weight. Fill it by 1's if you don't want to solve weighted + task. + N - number of points (optional): + * N>0 + * if given, only first N elements of X/Y/W are processed + * if not given, automatically determined from X/Y/W sizes + XC - points where spline values/derivatives are constrained, + array[0..K-1]. + YC - values of constraints, array[0..K-1] + DC - array[0..K-1], types of constraints: + * DC[i]=0 means that S(XC[i])=YC[i] + * DC[i]=1 means that S'(XC[i])=YC[i] + SEE BELOW FOR IMPORTANT INFORMATION ON CONSTRAINTS + K - number of constraints (optional): + * 0<=K<M. + * K=0 means no constraints (XC/YC/DC are not used) + * if given, only first K elements of XC/YC/DC are used + * if not given, automatically determined from XC/YC/DC + M - number of basis functions (= 2 * number of nodes), + M>=4, + M IS EVEN! OUTPUT PARAMETERS: - State - structure stores algorithm state + Info- same format as in LSFitLinearW() subroutine: + * Info>0 task is solved + * Info<=0 an error occured: + -4 means inconvergence of internal SVD + -3 means inconsistent constraints + -2 means odd M was passed (which is not supported) + -1 means another errors in parameters passed + (N<=0, for example) + S - spline interpolant. + Rep - report, same format as in LSFitLinearW() subroutine. + Following fields are set: + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error + NON-WEIGHTED ERRORS ARE CALCULATED - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdcreateentryexit( - ae_int_t n, - ae_int_t entrystate, - ae_int_t exitstate, - mcpdstate& s); +IMPORTANT: + this subroitine doesn't calculate task's condition number for K<>0. -
    - -
    -
    /************************************************************************* -DESCRIPTION: +IMPORTANT: + this subroitine supports only even M's -This function is a specialized version of MCPDCreate() function, and we -recommend you to read comments for this function for general information -about MCPD solver. -This function creates MCPD (Markov Chains for Population Data) solver -for "Exit-state" model, i.e. model where transition from X[i] to X[i+1] -is modelled as - X[i+1] = P*X[i] -where - X[i] and X[i+1] are N-dimensional state vectors - P is a N*N transition matrix -and one selected component of X[] is called "exit" state and is treated -in a special way: - system state can transit from any state into "exit" state - system state can not transit from "exit" state into any other state - transition operator discards "exit" state (makes it zero at each turn) -Such conditions basically mean that column of P which corresponds to -"exit" state is zero. Multiplication by such P may decrease sum of vector -components. +ORDER OF POINTS -Such models arise when: -* there is some population of individuals -* individuals can have different states -* individuals can transit from one state to another -* population size is NOT constant - individuals can move into "exit" state - and leave population at the next turn, but there are no new individuals -* amount of individuals which leave population can be predicted -* you want to model transitions of individuals from one state into another - (including transitions into the "exit" state) +Subroutine automatically sorts points, so caller may pass unsorted array. -This model is discussed in more details in the ALGLIB User Guide (see -http://www.alglib.net/dataanalysis/ for more data). +SETTING CONSTRAINTS - DANGERS AND OPPORTUNITIES: -INPUT PARAMETERS: - N - problem dimension, N>=2 - ExitState- index of exit state, in 0..N-1 +Setting constraints can lead to undesired results, like ill-conditioned +behavior, or inconsistency being detected. From the other side, it allows +us to improve quality of the fit. Here we summarize our experience with +constrained regression splines: +* excessive constraints can be inconsistent. Splines are piecewise cubic + functions, and it is easy to create an example, where large number of + constraints concentrated in small area will result in inconsistency. + Just because spline is not flexible enough to satisfy all of them. And + same constraints spread across the [min(x),max(x)] will be perfectly + consistent. +* the more evenly constraints are spread across [min(x),max(x)], the more + chances that they will be consistent +* the greater is M (given fixed constraints), the more chances that + constraints will be consistent +* in the general case, consistency of constraints is NOT GUARANTEED. +* in the several special cases, however, we can guarantee consistency. +* one of this cases is M>=4 and constraints on the function value + (AND/OR its derivative) at the interval boundaries. +* another special case is M>=4 and ONE constraint on the function value + (OR, BUT NOT AND, derivative) anywhere in [min(x),max(x)] -OUTPUT PARAMETERS: - State - structure stores algorithm state +Our final recommendation is to use constraints WHEN AND ONLY when you +can't solve your task without them. Anything beyond special cases given +above is not guaranteed and may result in inconsistency. - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 18.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mcpdcreateexit(ae_int_t n, ae_int_t exitstate, mcpdstate& s); +
    void alglib::spline1dfithermitewc( + real_1d_array x, + real_1d_array y, + real_1d_array w, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfithermitewc( + real_1d_array x, + real_1d_array y, + real_1d_array w, + ae_int_t n, + real_1d_array xc, + real_1d_array yc, + integer_1d_array dc, + ae_int_t k, + ae_int_t m, + ae_int_t& info, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    - +
    -
    /************************************************************************* -MCPD results - -INPUT PARAMETERS: - State - algorithm state - -OUTPUT PARAMETERS: - P - array[N,N], transition matrix - Rep - optimization report. You should check Rep.TerminationType - in order to distinguish successful termination from - unsuccessful one. Speaking short, positive values denote - success, negative ones are failures. - More information about fields of this structure can be - found in the comments on MCPDReport datatype. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" +using namespace alglib; - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdresults(mcpdstate s, real_2d_array& p, mcpdreport& rep); -
    -

    Examples:   [1]  [2]  

    - -
    -
    /************************************************************************* -This function is used to add bound constraints on the elements of the -transition matrix P. +int main(int argc, char **argv) +{ + // + // In this example we demonstrate linear fitting by f(x|a) = a*exp(0.5*x). + // + // We have: + // * y - vector of experimental data + // * fmatrix - matrix of basis functions calculated at sample points + // Actually, we have only one basis function F0 = exp(0.5*x). + // + real_2d_array fmatrix = "[[0.606531],[0.670320],[0.740818],[0.818731],[0.904837],[1.000000],[1.105171],[1.221403],[1.349859],[1.491825],[1.648721]]"; + real_1d_array y = "[1.133719, 1.306522, 1.504604, 1.554663, 1.884638, 2.072436, 2.257285, 2.534068, 2.622017, 2.897713, 3.219371]"; + ae_int_t info; + real_1d_array c; + lsfitreport rep; -MCPD solver has four types of constraints which can be placed on P: -* user-specified equality constraints (optional) -* user-specified bound constraints (optional) -* user-specified general linear constraints (optional) -* basic constraints (always present): - * non-negativity: P[i,j]>=0 - * consistency: every column of P sums to 1.0 + // + // Linear fitting without weights + // + lsfitlinear(y, fmatrix, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [1.98650] -Final constraints which are passed to the underlying optimizer are -calculated as intersection of all present constraints. For example, you -may specify boundary constraint on P[0,0] and equality one: - 0.1<=P[0,0]<=0.9 - P[0,0]=0.5 -Such combination of constraints will be silently reduced to their -intersection, which is P[0,0]=0.5. + // + // Linear fitting with individual weights. + // Slightly different result is returned. + // + real_1d_array w = "[1.414213, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]"; + lsfitlinearw(y, w, fmatrix, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [1.983354] + return 0; +} -This function can be used to place bound constraints on arbitrary -subset of elements of P. Set of constraints is specified by BndL/BndU -matrices, which may contain arbitrary combination of finite numbers or -infinities (like -INF<x<=0.5 or 0.1<=x<+INF). -You can also use MCPDAddBC() function which allows to ADD bound constraint -for one element of P without changing constraints for other elements. +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -These functions (MCPDSetBC and MCPDAddBC) interact as follows:
    -* there is internal matrix of bound constraints which is stored in the
    -  MCPD solver
    -* MCPDSetBC() replaces this matrix by another one (SET)
    -* MCPDAddBC() modifies one element of this matrix and  leaves  other  ones
    -  unchanged (ADD)
    -* thus  MCPDAddBC()  call  preserves  all  modifications  done by previous
    -  calls,  while  MCPDSetBC()  completely discards all changes  done to the
    -  equality constraints.
    +using namespace alglib;
     
    -INPUT PARAMETERS:
    -    S       -   solver
    -    BndL    -   lower bounds constraints, array[N,N]. Elements of BndL can
    -                be finite numbers or -INF.
    -    BndU    -   upper bounds constraints, array[N,N]. Elements of BndU can
    -                be finite numbers or +INF.
     
    -  -- ALGLIB --
    -     Copyright 23.05.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::mcpdsetbc( - mcpdstate s, - real_2d_array bndl, - real_2d_array bndu); - -
    - -
    -
    /************************************************************************* -This function is used to add equality constraints on the elements of the -transition matrix P. - -MCPD solver has four types of constraints which can be placed on P: -* user-specified equality constraints (optional) -* user-specified bound constraints (optional) -* user-specified general linear constraints (optional) -* basic constraints (always present): - * non-negativity: P[i,j]>=0 - * consistency: every column of P sums to 1.0 - -Final constraints which are passed to the underlying optimizer are -calculated as intersection of all present constraints. For example, you -may specify boundary constraint on P[0,0] and equality one: - 0.1<=P[0,0]<=0.9 - P[0,0]=0.5 -Such combination of constraints will be silently reduced to their -intersection, which is P[0,0]=0.5. - -This function can be used to place equality constraints on arbitrary -subset of elements of P. Set of constraints is specified by EC, which may -contain either NAN's or finite numbers from [0,1]. NAN denotes absence of -constraint, finite number denotes equality constraint on specific element -of P. - -You can also use MCPDAddEC() function which allows to ADD equality -constraint for one element of P without changing constraints for other -elements. - -These functions (MCPDSetEC and MCPDAddEC) interact as follows: -* there is internal matrix of equality constraints which is stored in the - MCPD solver -* MCPDSetEC() replaces this matrix by another one (SET) -* MCPDAddEC() modifies one element of this matrix and leaves other ones - unchanged (ADD) -* thus MCPDAddEC() call preserves all modifications done by previous - calls, while MCPDSetEC() completely discards all changes done to the - equality constraints. - -INPUT PARAMETERS: - S - solver - EC - equality constraints, array[N,N]. Elements of EC can be - either NAN's or finite numbers from [0,1]. NAN denotes - absence of constraints, while finite value denotes - equality constraint on the corresponding element of P. +int main(int argc, char **argv) +{ + // + // In this example we demonstrate linear fitting by f(x|a,b) = a*x+b + // with simple constraint f(0)=0. + // + // We have: + // * y - vector of experimental data + // * fmatrix - matrix of basis functions sampled at [0,1] with step 0.2: + // [ 1.0 0.0 ] + // [ 1.0 0.2 ] + // [ 1.0 0.4 ] + // [ 1.0 0.6 ] + // [ 1.0 0.8 ] + // [ 1.0 1.0 ] + // first column contains value of first basis function (constant term) + // second column contains second basis function (linear term) + // * cmatrix - matrix of linear constraints: + // [ 1.0 0.0 0.0 ] + // first two columns contain coefficients before basis functions, + // last column contains desired value of their sum. + // So [1,0,0] means "1*constant_term + 0*linear_term = 0" + // + real_1d_array y = "[0.072436,0.246944,0.491263,0.522300,0.714064,0.921929]"; + real_2d_array fmatrix = "[[1,0.0],[1,0.2],[1,0.4],[1,0.6],[1,0.8],[1,1.0]]"; + real_2d_array cmatrix = "[[1,0,0]]"; + ae_int_t info; + real_1d_array c; + lsfitreport rep; -NOTES: + // + // Constrained fitting without weights + // + lsfitlinearc(y, fmatrix, cmatrix, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [0,0.932933] -1. infinite values of EC will lead to exception being thrown. Values less -than 0.0 or greater than 1.0 will lead to error code being returned after -call to MCPDSolve(). + // + // Constrained fitting with individual weights + // + real_1d_array w = "[1, 1.414213, 1, 1, 1, 1]"; + lsfitlinearwc(y, w, fmatrix, cmatrix, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", c.tostring(3).c_str()); // EXPECTED: [0,0.938322] + return 0; +} - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdsetec(mcpdstate s, real_2d_array ec); -
    - +
    -
    /************************************************************************* -This function is used to set linear equality/inequality constraints on the -elements of the transition matrix P. - -This function can be used to set one or several general linear constraints -on the elements of P. Two types of constraints are supported: -* equality constraints -* inequality constraints (both less-or-equal and greater-or-equal) - -Coefficients of constraints are specified by matrix C (one of the -parameters). One row of C corresponds to one constraint. Because -transition matrix P has N*N elements, we need N*N columns to store all -coefficients (they are stored row by row), and one more column to store -right part - hence C has N*N+1 columns. Constraint kind is stored in the -CT array. - -Thus, I-th linear constraint is - P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] + - + P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... + - + P[N-1,N-1]*C[I,N*N-1] ?=? C[I,N*N] -where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" (CT[i]>0). - -Your constraint may involve only some subset of P (less than N*N elements). -For example it can be something like - P[0,0] + P[0,1] = 0.5 -In this case you still should pass matrix with N*N+1 columns, but all its -elements (except for C[0,0], C[0,1] and C[0,N*N-1]) will be zero. - -INPUT PARAMETERS: - S - solver - C - array[K,N*N+1] - coefficients of constraints - (see above for complete description) - CT - array[K] - constraint types - (see above for complete description) - K - number of equality/inequality constraints, K>=0: - * if given, only leading K elements of C/CT are used - * if not given, automatically determined from sizes of C/CT - - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdsetlc(mcpdstate s, real_2d_array c, integer_1d_array ct); -void alglib::mcpdsetlc( - mcpdstate s, - real_2d_array c, - integer_1d_array ct, - ae_int_t k); +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -
    - -
    -
    /************************************************************************* -This function is used to change prediction weights +using namespace alglib; +void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) +{ + // this callback calculates f(c,x)=exp(-c0*sqr(x0)) + // where x is a position on X-axis and c is adjustable parameter + func = exp(-c[0]*pow(x[0],2)); +} -MCPD solver scales prediction errors as follows - Error(P) = ||W*(y-P*x)||^2 -where - x is a system state at time t - y is a system state at time t+1 - P is a transition matrix - W is a diagonal scaling matrix +int main(int argc, char **argv) +{ + // + // In this example we demonstrate exponential fitting + // by f(x) = exp(-c*x^2) + // using function value only. + // + // Gradient is estimated using combination of numerical differences + // and secant updates. diffstep variable stores differentiation step + // (we have to tell algorithm what step to use). + // + real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]"; + real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]"; + real_1d_array c = "[0.3]"; + double epsx = 0.000001; + ae_int_t maxits = 0; + ae_int_t info; + lsfitstate state; + lsfitreport rep; + double diffstep = 0.0001; -By default, weights are chosen in order to minimize relative prediction -error instead of absolute one. For example, if one component of state is -about 0.5 in magnitude and another one is about 0.05, then algorithm will -make corresponding weights equal to 2.0 and 20.0. + // + // Fitting without weights + // + lsfitcreatef(x, y, c, diffstep, state); + lsfitsetcond(state, epsx, maxits); + alglib::lsfitfit(state, function_cx_1_func); + lsfitresults(state, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 2 + printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5] -INPUT PARAMETERS: - S - solver - PW - array[N], weights: - * must be non-negative values (exception will be thrown otherwise) - * zero values will be replaced by automatically chosen values + // + // Fitting with weights + // (you can change weights and see how it changes result) + // + real_1d_array w = "[1,1,1,1,1,1,1,1,1,1,1]"; + lsfitcreatewf(x, y, w, c, diffstep, state); + lsfitsetcond(state, epsx, maxits); + alglib::lsfitfit(state, function_cx_1_func); + lsfitresults(state, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 2 + printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5] + return 0; +} - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdsetpredictionweights(mcpdstate s, real_1d_array pw); -
    - +
    -
    /************************************************************************* -This function allows to set prior values used for regularization of your -problem. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -By default, regularizing term is equal to r*||P-prior_P||^2, where r is a -small non-zero value, P is transition matrix, prior_P is identity matrix, -||X||^2 is a sum of squared elements of X. +using namespace alglib; +void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) +{ + // this callback calculates f(c,x)=exp(-c0*sqr(x0)) + // where x is a position on X-axis and c is adjustable parameter + func = exp(-c[0]*pow(x[0],2)); +} -This function allows you to change prior values prior_P. You can also -change r with MCPDSetTikhonovRegularizer() function. +int main(int argc, char **argv) +{ + // + // In this example we demonstrate exponential fitting by + // f(x) = exp(-c*x^2) + // subject to bound constraints + // 0.0 <= c <= 1.0 + // using function value only. + // + // Gradient is estimated using combination of numerical differences + // and secant updates. diffstep variable stores differentiation step + // (we have to tell algorithm what step to use). + // + // Unconstrained solution is c=1.5, but because of constraints we should + // get c=1.0 (at the boundary). + // + real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]"; + real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]"; + real_1d_array c = "[0.3]"; + real_1d_array bndl = "[0.0]"; + real_1d_array bndu = "[1.0]"; + double epsx = 0.000001; + ae_int_t maxits = 0; + ae_int_t info; + lsfitstate state; + lsfitreport rep; + double diffstep = 0.0001; -INPUT PARAMETERS: - S - solver - PP - array[N,N], matrix of prior values: - 1. elements must be real numbers from [0,1] - 2. columns must sum to 1.0. - First property is checked (exception is thrown otherwise), - while second one is not checked/enforced. + lsfitcreatef(x, y, c, diffstep, state); + lsfitsetbc(state, bndl, bndu); + lsfitsetcond(state, epsx, maxits); + alglib::lsfitfit(state, function_cx_1_func); + lsfitresults(state, info, c, rep); + printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.0] + return 0; +} - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdsetprior(mcpdstate s, real_2d_array pp); -
    - +
    -
    /************************************************************************* -This function allows to tune amount of Tikhonov regularization being -applied to your problem. - -By default, regularizing term is equal to r*||P-prior_P||^2, where r is a -small non-zero value, P is transition matrix, prior_P is identity matrix, -||X||^2 is a sum of squared elements of X. - -This function allows you to change coefficient r. You can also change -prior values with MCPDSetPrior() function. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS: - S - solver - V - regularization coefficient, finite non-negative value. It - is not recommended to specify zero value unless you are - pretty sure that you want it. +using namespace alglib; +void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) +{ + // this callback calculates f(c,x)=exp(-c0*sqr(x0)) + // where x is a position on X-axis and c is adjustable parameter + func = exp(-c[0]*pow(x[0],2)); +} +void function_cx_1_grad(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) +{ + // this callback calculates f(c,x)=exp(-c0*sqr(x0)) and gradient G={df/dc[i]} + // where x is a position on X-axis and c is adjustable parameter. + // IMPORTANT: gradient is calculated with respect to C, not to X + func = exp(-c[0]*pow(x[0],2)); + grad[0] = -pow(x[0],2)*func; +} - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdsettikhonovregularizer(mcpdstate s, double v); +int main(int argc, char **argv) +{ + // + // In this example we demonstrate exponential fitting + // by f(x) = exp(-c*x^2) + // using function value and gradient (with respect to c). + // + real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]"; + real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]"; + real_1d_array c = "[0.3]"; + double epsx = 0.000001; + ae_int_t maxits = 0; + ae_int_t info; + lsfitstate state; + lsfitreport rep; -
    - -
    -
    /************************************************************************* -This function is used to start solution of the MCPD problem. + // + // Fitting without weights + // + lsfitcreatefg(x, y, c, true, state); + lsfitsetcond(state, epsx, maxits); + alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad); + lsfitresults(state, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 2 + printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5] -After return from this function, you can use MCPDResults() to get solution -and completion code. + // + // Fitting with weights + // (you can change weights and see how it changes result) + // + real_1d_array w = "[1,1,1,1,1,1,1,1,1,1,1]"; + lsfitcreatewfg(x, y, w, c, true, state); + lsfitsetcond(state, epsx, maxits); + alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad); + lsfitresults(state, info, c, rep); + printf("%d\n", int(info)); // EXPECTED: 2 + printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5] + return 0; +} - -- ALGLIB -- - Copyright 23.05.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mcpdsolve(mcpdstate s); -
    -

    Examples:   [1]  [2]  

    - +
     #include "stdafx.h"
     #include <stdlib.h>
     #include <stdio.h>
     #include <math.h>
    -#include "dataanalysis.h"
    +#include "interpolation.h"
     
     using namespace alglib;
    -
    +void function_cx_1_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    +{
    +    // this callback calculates f(c,x)=exp(-c0*sqr(x0))
    +    // where x is a position on X-axis and c is adjustable parameter
    +    func = exp(-c[0]*pow(x[0],2));
    +}
    +void function_cx_1_grad(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
    +{
    +    // this callback calculates f(c,x)=exp(-c0*sqr(x0)) and gradient G={df/dc[i]}
    +    // where x is a position on X-axis and c is adjustable parameter.
    +    // IMPORTANT: gradient is calculated with respect to C, not to X
    +    func = exp(-c[0]*pow(x[0],2));
    +    grad[0] = -pow(x[0],2)*func;
    +}
    +void function_cx_1_hess(const real_1d_array &c, const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr) 
    +{
    +    // this callback calculates f(c,x)=exp(-c0*sqr(x0)), gradient G={df/dc[i]} and Hessian H={d2f/(dc[i]*dc[j])}
    +    // where x is a position on X-axis and c is adjustable parameter.
    +    // IMPORTANT: gradient/Hessian are calculated with respect to C, not to X
    +    func = exp(-c[0]*pow(x[0],2));
    +    grad[0] = -pow(x[0],2)*func;
    +    hess[0][0] = pow(x[0],4)*func;
    +}
     
     int main(int argc, char **argv)
     {
         //
    -    // The very simple MCPD example
    -    //
    -    // We have a loan portfolio. Our loans can be in one of two states:
    -    // * normal loans ("good" ones)
    -    // * past due loans ("bad" ones)
    -    //
    -    // We assume that:
    -    // * loans can transition from any state to any other state. In 
    -    //   particular, past due loan can become "good" one at any moment 
    -    //   with same (fixed) probability. Not realistic, but it is toy example :)
    -    // * portfolio size does not change over time
    +    // In this example we demonstrate exponential fitting
    +    // by f(x) = exp(-c*x^2)
    +    // using function value, gradient and Hessian (with respect to c)
         //
    -    // Thus, we have following model
    -    //     state_new = P*state_old
    -    // where
    -    //         ( p00  p01 )
    -    //     P = (          )
    -    //         ( p10  p11 )
    +    real_2d_array x = "[[-1],[-0.8],[-0.6],[-0.4],[-0.2],[0],[0.2],[0.4],[0.6],[0.8],[1.0]]";
    +    real_1d_array y = "[0.223130, 0.382893, 0.582748, 0.786628, 0.941765, 1.000000, 0.941765, 0.786628, 0.582748, 0.382893, 0.223130]";
    +    real_1d_array c = "[0.3]";
    +    double epsx = 0.000001;
    +    ae_int_t maxits = 0;
    +    ae_int_t info;
    +    lsfitstate state;
    +    lsfitreport rep;
    +
         //
    -    // We want to model transitions between these two states using MCPD
    -    // approach (Markov Chains for Proportional/Population Data), i.e.
    -    // to restore hidden transition matrix P using actual portfolio data.
    -    // We have:
    -    // * poportional data, i.e. proportion of loans in the normal and past 
    -    //   due states (not portfolio size measured in some currency, although 
    -    //   it is possible to work with population data too)
    -    // * two tracks, i.e. two sequences which describe portfolio
    -    //   evolution from two different starting states: [1,0] (all loans 
    -    //   are "good") and [0.8,0.2] (only 80% of portfolio is in the "good"
    -    //   state)
    +    // Fitting without weights
         //
    -    mcpdstate s;
    -    mcpdreport rep;
    -    real_2d_array p;
    -    real_2d_array track0 = "[[1.00000,0.00000],[0.95000,0.05000],[0.92750,0.07250],[0.91738,0.08263],[0.91282,0.08718]]";
    -    real_2d_array track1 = "[[0.80000,0.20000],[0.86000,0.14000],[0.88700,0.11300],[0.89915,0.10085]]";
    -
    -    mcpdcreate(2, s);
    -    mcpdaddtrack(s, track0);
    -    mcpdaddtrack(s, track1);
    -    mcpdsolve(s);
    -    mcpdresults(s, p, rep);
    +    lsfitcreatefgh(x, y, c, state);
    +    lsfitsetcond(state, epsx, maxits);
    +    alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad, function_cx_1_hess);
    +    lsfitresults(state, info, c, rep);
    +    printf("%d\n", int(info)); // EXPECTED: 2
    +    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
     
         //
    -    // Hidden matrix P is equal to
    -    //         ( 0.95  0.50 )
    -    //         (            )
    -    //         ( 0.05  0.50 )
    -    // which means that "good" loans can become "bad" with 5% probability, 
    -    // while "bad" loans will return to good state with 50% probability.
    +    // Fitting with weights
    +    // (you can change weights and see how it changes result)
         //
    -    printf("%s\n", p.tostring(2).c_str()); // EXPECTED: [[0.95,0.50],[0.05,0.50]]
    +    real_1d_array w = "[1,1,1,1,1,1,1,1,1,1,1]";
    +    lsfitcreatewfgh(x, y, w, c, state);
    +    lsfitsetcond(state, epsx, maxits);
    +    alglib::lsfitfit(state, function_cx_1_func, function_cx_1_grad, function_cx_1_hess);
    +    lsfitresults(state, info, c, rep);
    +    printf("%d\n", int(info)); // EXPECTED: 2
    +    printf("%s\n", c.tostring(1).c_str()); // EXPECTED: [1.5]
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
     #include <stdio.h>
     #include <math.h>
    -#include "dataanalysis.h"
    +#include "interpolation.h"
     
     using namespace alglib;
    -
    +void function_debt_func(const real_1d_array &c, const real_1d_array &x, double &func, void *ptr) 
    +{
    +    //
    +    // this callback calculates f(c,x)=c[0]*(1+c[1]*(pow(x[0]-1999,c[2])-1))
    +    //
    +    func = c[0]*(1+c[1]*(pow(x[0]-1999,c[2])-1));
    +}
     
     int main(int argc, char **argv)
     {
         //
    -    // Simple MCPD example
    +    // In this example we demonstrate fitting by
    +    //     f(x) = c[0]*(1+c[1]*((x-1999)^c[2]-1))
    +    // subject to bound constraints
    +    //     -INF  < c[0] < +INF
    +    //      -10 <= c[1] <= +10
    +    //      0.1 <= c[2] <= 2.0
    +    // Data we want to fit are time series of Japan national debt
    +    // collected from 2000 to 2008 measured in USD (dollars, not
    +    // millions of dollars).
         //
    -    // We have a loan portfolio. Our loans can be in one of three states:
    -    // * normal loans
    -    // * past due loans
    -    // * charged off loans
    +    // Our variables are:
    +    //     c[0] - debt value at initial moment (2000),
    +    //     c[1] - direction coefficient (growth or decrease),
    +    //     c[2] - curvature coefficient.
    +    // You may see that our variables are badly scaled - first one 
    +    // is order of 10^12, and next two are somewhere about 1 in 
    +    // magnitude. Such problem is difficult to solve without some
    +    // kind of scaling.
    +    // That is exactly where lsfitsetscale() function can be used.
    +    // We set scale of our variables to [1.0E12, 1, 1], which allows
    +    // us to easily solve this problem.
         //
    -    // We assume that:
    -    // * normal loan can stay normal or become past due (but not charged off)
    -    // * past due loan can stay past due, become normal or charged off
    -    // * charged off loan will stay charged off for the rest of eternity
    -    // * portfolio size does not change over time
    -    // Not realistic, but it is toy example :)
    +    // You can try commenting out lsfitsetscale() call - and you will 
    +    // see that algorithm will fail to converge.
         //
    -    // Thus, we have following model
    -    //     state_new = P*state_old
    -    // where
    -    //         ( p00  p01    )
    -    //     P = ( p10  p11    )
    -    //         (      p21  1 )
    -    // i.e. four elements of P are known a priori.
    +    real_2d_array x = "[[2000],[2001],[2002],[2003],[2004],[2005],[2006],[2007],[2008]]";
    +    real_1d_array y = "[4323239600000.0, 4560913100000.0, 5564091500000.0, 6743189300000.0, 7284064600000.0, 7050129600000.0, 7092221500000.0, 8483907600000.0, 8625804400000.0]";
    +    real_1d_array c = "[1.0e+13, 1, 1]";
    +    double epsx = 1.0e-5;
    +    real_1d_array bndl = "[-inf, -10, 0.1]";
    +    real_1d_array bndu = "[+inf, +10, 2.0]";
    +    real_1d_array s = "[1.0e+12, 1, 1]";
    +    ae_int_t maxits = 0;
    +    ae_int_t info;
    +    lsfitstate state;
    +    lsfitreport rep;
    +    double diffstep = 1.0e-5;
    +
    +    lsfitcreatef(x, y, c, diffstep, state);
    +    lsfitsetcond(state, epsx, maxits);
    +    lsfitsetbc(state, bndl, bndu);
    +    lsfitsetscale(state, s);
    +    alglib::lsfitfit(state, function_debt_func);
    +    lsfitresults(state, info, c, rep);
    +    printf("%d\n", int(info)); // EXPECTED: 2
    +    printf("%s\n", c.tostring(-2).c_str()); // EXPECTED: [4.142560E+12, 0.434240, 0.565376]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
         //
    -    // Although it is possible (given enough data) to In order to enforce 
    -    // this property we set equality constraints on these elements.
    +    // This example demonstrates polynomial fitting.
    +    //
    +    // Fitting is done by two (M=2) functions from polynomial basis:
    +    //     f0 = 1
    +    //     f1 = x
    +    // Basically, it just a linear fit; more complex polynomials may be used
    +    // (e.g. parabolas with M=3, cubic with M=4), but even such simple fit allows
    +    // us to demonstrate polynomialfit() function in action.
         //
    -    // We want to model transitions between these two states using MCPD
    -    // approach (Markov Chains for Proportional/Population Data), i.e.
    -    // to restore hidden transition matrix P using actual portfolio data.
         // We have:
    -    // * poportional data, i.e. proportion of loans in the current and past 
    -    //   due states (not portfolio size measured in some currency, although 
    -    //   it is possible to work with population data too)
    -    // * two tracks, i.e. two sequences which describe portfolio
    -    //   evolution from two different starting states: [1,0,0] (all loans 
    -    //   are "good") and [0.8,0.2,0.0] (only 80% of portfolio is in the "good"
    -    //   state)
    +    // * x      set of abscissas
    +    // * y      experimental data
         //
    -    mcpdstate s;
    -    mcpdreport rep;
    -    real_2d_array p;
    -    real_2d_array track0 = "[[1.000000,0.000000,0.000000],[0.950000,0.050000,0.000000],[0.927500,0.060000,0.012500],[0.911125,0.061375,0.027500],[0.896256,0.060900,0.042844]]";
    -    real_2d_array track1 = "[[0.800000,0.200000,0.000000],[0.860000,0.090000,0.050000],[0.862000,0.065500,0.072500],[0.851650,0.059475,0.088875],[0.838805,0.057451,0.103744]]";
    +    // Additionally we demonstrate weighted fitting, where second point has
    +    // more weight than other ones.
    +    //
    +    real_1d_array x = "[0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]";
    +    real_1d_array y = "[0.00,0.05,0.26,0.32,0.33,0.43,0.60,0.60,0.77,0.98,1.02]";
    +    ae_int_t m = 2;
    +    double t = 2;
    +    ae_int_t info;
    +    barycentricinterpolant p;
    +    polynomialfitreport rep;
    +    double v;
     
    -    mcpdcreate(3, s);
    -    mcpdaddtrack(s, track0);
    -    mcpdaddtrack(s, track1);
    -    mcpdaddec(s, 0, 2, 0.0);
    -    mcpdaddec(s, 1, 2, 0.0);
    -    mcpdaddec(s, 2, 2, 1.0);
    -    mcpdaddec(s, 2, 0, 0.0);
    -    mcpdsolve(s);
    -    mcpdresults(s, p, rep);
    +    //
    +    // Fitting without individual weights
    +    //
    +    // NOTE: result is returned as barycentricinterpolant structure.
    +    //       if you want to get representation in the power basis,
    +    //       you can use barycentricbar2pow() function to convert
    +    //       from barycentric to power representation (see docs for 
    +    //       POLINT subpackage for more info).
    +    //
    +    polynomialfit(x, y, m, info, p, rep);
    +    v = barycentriccalc(p, t);
    +    printf("%.2f\n", double(v)); // EXPECTED: 2.011
     
         //
    -    // Hidden matrix P is equal to
    -    //         ( 0.95 0.50      )
    -    //         ( 0.05 0.25      )
    -    //         (      0.25 1.00 ) 
    -    // which means that "good" loans can become past due with 5% probability, 
    -    // while past due loans will become charged off with 25% probability or
    -    // return back to normal state with 50% probability.
    +    // Fitting with individual weights
         //
    -    printf("%s\n", p.tostring(2).c_str()); // EXPECTED: [[0.95,0.50,0.00],[0.05,0.25,0.00],[0.00,0.25,1.00]]
    +    // NOTE: slightly different result is returned
    +    //
    +    real_1d_array w = "[1,1.414213562,1,1,1,1,1,1,1,1,1]";
    +    real_1d_array xc = "[]";
    +    real_1d_array yc = "[]";
    +    integer_1d_array dc = "[]";
    +    polynomialfitwc(x, y, w, xc, yc, dc, m, info, p, rep);
    +    v = barycentriccalc(p, t);
    +    printf("%.2f\n", double(v)); // EXPECTED: 2.023
         return 0;
     }
     
     
    -
    -
    - -minbleicreport
    -minbleicstate
    - -minbleiccreate
    -minbleiccreatef
    -minbleicoptimize
    -minbleicrequesttermination
    -minbleicrestartfrom
    -minbleicresults
    -minbleicresultsbuf
    -minbleicsetbc
    -minbleicsetcond
    -minbleicsetgradientcheck
    -minbleicsetlc
    -minbleicsetprecdefault
    -minbleicsetprecdiag
    -minbleicsetprecscale
    -minbleicsetscale
    -minbleicsetstpmax
    -minbleicsetxrep
    - - - - - - -
    minbleic_d_1 Nonlinear optimization with bound constraints
    minbleic_d_2 Nonlinear optimization with linear inequality constraints
    minbleic_ftrim Nonlinear optimization by BLEIC, function with singularities
    minbleic_numdiff Nonlinear optimization with bound constraints and numerical differentiation
    - +
    -
    /************************************************************************* -This structure stores optimization report: -* IterationsCount number of iterations -* NFEV number of gradient evaluations -* TerminationType termination type (see below) - -TERMINATION CODES +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -TerminationType field contains completion code, which can be: - -8 internal integrity control detected infinite or NAN values in - function/gradient. Abnormal termination signalled. - -7 gradient verification failed. - See MinBLEICSetGradientCheck() for more information. - -3 inconsistent constraints. Feasible point is - either nonexistent or too hard to find. Try to - restart optimizer with better initial approximation - 1 relative function improvement is no more than EpsF. - 2 relative step is no more than EpsX. - 4 gradient norm is no more than EpsG - 5 MaxIts steps was taken - 7 stopping conditions are too stringent, - further improvement is impossible, - X contains best point found so far. - 8 terminated by user who called minbleicrequesttermination(). X contains - point which was "current accepted" when termination request was - submitted. +using namespace alglib; -ADDITIONAL FIELDS -There are additional fields which can be used for debugging: -* DebugEqErr error in the equality constraints (2-norm) -* DebugFS f, calculated at projection of initial point - to the feasible set -* DebugFF f, calculated at the final point -* DebugDX |X_start-X_final| -*************************************************************************/ -
    class minbleicreport +int main(int argc, char **argv) { - ae_int_t iterationscount; - ae_int_t nfev; - ae_int_t varidx; - ae_int_t terminationtype; - double debugeqerr; - double debugfs; - double debugff; - double debugdx; - ae_int_t debugfeasqpits; - ae_int_t debugfeasgpaits; - ae_int_t inneriterationscount; - ae_int_t outeriterationscount; -}; + // + // This example demonstrates polynomial fitting. + // + // Fitting is done by two (M=2) functions from polynomial basis: + // f0 = 1 + // f1 = x + // with simple constraint on function value + // f(0) = 0 + // Basically, it just a linear fit; more complex polynomials may be used + // (e.g. parabolas with M=3, cubic with M=4), but even such simple fit allows + // us to demonstrate polynomialfit() function in action. + // + // We have: + // * x set of abscissas + // * y experimental data + // * xc points where constraints are placed + // * yc constraints on derivatives + // * dc derivative indices + // (0 means function itself, 1 means first derivative) + // + real_1d_array x = "[1.0,1.0]"; + real_1d_array y = "[0.9,1.1]"; + real_1d_array w = "[1,1]"; + real_1d_array xc = "[0]"; + real_1d_array yc = "[0]"; + integer_1d_array dc = "[0]"; + double t = 2; + ae_int_t m = 2; + ae_int_t info; + barycentricinterpolant p; + polynomialfitreport rep; + double v; -
    - -
    -
    /************************************************************************* -This object stores nonlinear optimizer state. -You should use functions provided by MinBLEIC subpackage to work with this -object -*************************************************************************/ -
    class minbleicstate -{ -}; + polynomialfitwc(x, y, w, xc, yc, dc, m, info, p, rep); + v = barycentriccalc(p, t); + printf("%.2f\n", double(v)); // EXPECTED: 2.000 + return 0; +} -
    - + +
    -
    /************************************************************************* - BOUND CONSTRAINED OPTIMIZATION - WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -DESCRIPTION: -The subroutine minimizes function F(x) of N arguments subject to any -combination of: -* bound constraints -* linear inequality constraints -* linear equality constraints +using namespace alglib; -REQUIREMENTS: -* user must provide function value and gradient -* starting point X0 must be feasible or - not too far away from the feasible set -* grad(f) must be Lipschitz continuous on a level set: - L = { x : f(x)<=f(x0) } -* function must be defined everywhere on the feasible set F -USAGE: +int main(int argc, char **argv) +{ + // + // In this example we demonstrate penalized spline fitting of noisy data + // + // We have: + // * x - abscissas + // * y - vector of experimental data, straight line with small noise + // + real_1d_array x = "[0.00,0.10,0.20,0.30,0.40,0.50,0.60,0.70,0.80,0.90]"; + real_1d_array y = "[0.10,0.00,0.30,0.40,0.30,0.40,0.62,0.68,0.75,0.95]"; + ae_int_t info; + double v; + spline1dinterpolant s; + spline1dfitreport rep; + double rho; -Constrained optimization if far more complex than the unconstrained one. -Here we give very brief outline of the BLEIC optimizer. We strongly recommend -you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide -on optimization, which is available at http://www.alglib.net/optimization/ + // + // Fit with VERY small amount of smoothing (rho = -5.0) + // and large number of basis functions (M=50). + // + // With such small regularization penalized spline almost fully reproduces function values + // + rho = -5.0; + spline1dfitpenalized(x, y, 50, rho, info, s, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + v = spline1dcalc(s, 0.0); + printf("%.1f\n", double(v)); // EXPECTED: 0.10 -1. User initializes algorithm state with MinBLEICCreate() call + // + // Fit with VERY large amount of smoothing (rho = 10.0) + // and large number of basis functions (M=50). + // + // With such regularization our spline should become close to the straight line fit. + // We will compare its value in x=1.0 with results obtained from such fit. + // + rho = +10.0; + spline1dfitpenalized(x, y, 50, rho, info, s, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + v = spline1dcalc(s, 1.0); + printf("%.2f\n", double(v)); // EXPECTED: 0.969 -2. USer adds boundary and/or linear constraints by calling - MinBLEICSetBC() and MinBLEICSetLC() functions. + // + // In real life applications you may need some moderate degree of fitting, + // so we try to fit once more with rho=3.0. + // + rho = +3.0; + spline1dfitpenalized(x, y, 50, rho, info, s, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + return 0; +} -3. User sets stopping conditions with MinBLEICSetCond(). -4. User calls MinBLEICOptimize() function which takes algorithm state and - pointer (delegate, etc.) to callback function which calculates F/G. +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -5. User calls MinBLEICResults() to get solution
    +using namespace alglib;
     
    -6. Optionally user may call MinBLEICRestartFrom() to solve another problem
    -   with same N but another starting point.
    -   MinBLEICRestartFrom() allows to reuse already initialized structure.
     
    +int main(int argc, char **argv)
    +{
    +    real_1d_array x = "[1,2,3,4,5,6,7,8]";
    +    real_1d_array y = "[0.06313223,0.44552624,0.61838364,0.71385108,0.77345838,0.81383140,0.84280033,0.86449822]";
    +    ae_int_t n = 8;
    +    double a;
    +    double b;
    +    double c;
    +    double d;
    +    lsfitreport rep;
     
    -INPUT PARAMETERS:
    -    N       -   problem dimension, N>0:
    -                * if given, only leading N elements of X are used
    -                * if not given, automatically determined from size ofX
    -    X       -   starting point, array[N]:
    -                * it is better to set X to a feasible point
    -                * but X can be infeasible, in which case algorithm will try
    -                  to find feasible point first, using X as initial
    -                  approximation.
    +    //
    +    // Test logisticfit4() on carefully designed data with a priori known answer.
    +    //
    +    logisticfit4(x, y, n, a, b, c, d, rep);
    +    printf("%.1f\n", double(a)); // EXPECTED: -1.000
    +    printf("%.1f\n", double(b)); // EXPECTED: 1.200
    +    printf("%.1f\n", double(c)); // EXPECTED: 0.900
    +    printf("%.1f\n", double(d)); // EXPECTED: 1.000
     
    -OUTPUT PARAMETERS:
    -    State   -   structure stores algorithm state
    +    //
    +    // Evaluate model at point x=0.5
    +    //
    +    double v;
    +    v = logisticcalc4(0.5, a, b, c, d);
    +    printf("%.2f\n", double(v)); // EXPECTED: -0.33874308
    +    return 0;
    +}
     
    -  -- ALGLIB --
    -     Copyright 28.11.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::minbleiccreate(real_1d_array x, minbleicstate& state); -void alglib::minbleiccreate( - ae_int_t n, - real_1d_array x, - minbleicstate& state); -
    -

    Examples:   [1]  [2]  [3]  

    - +
    -
    /************************************************************************* -The subroutine is finite difference variant of MinBLEICCreate(). It uses -finite differences in order to differentiate target function. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -Description below contains information which is specific to this function -only. We recommend to read comments on MinBLEICCreate() in order to get -more information about creation of BLEIC optimizer. +using namespace alglib; -INPUT PARAMETERS: - N - problem dimension, N>0: - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - X - starting point, array[0..N-1]. - DiffStep- differentiation step, >0 -OUTPUT PARAMETERS: - State - structure which stores algorithm state +int main(int argc, char **argv) +{ + real_1d_array x = "[1,2,3,4,5,6,7,8]"; + real_1d_array y = "[0.1949776139,0.5710060208,0.726002637,0.8060434158,0.8534547965,0.8842071579,0.9054773317,0.9209088299]"; + ae_int_t n = 8; + double a; + double b; + double c; + double d; + double g; + lsfitreport rep; -NOTES: -1. algorithm uses 4-point central formula for differentiation. -2. differentiation step along I-th axis is equal to DiffStep*S[I] where - S[] is scaling vector which can be set by MinBLEICSetScale() call. -3. we recommend you to use moderate values of differentiation step. Too - large step will result in too large truncation errors, while too small - step will result in too large numerical errors. 1.0E-6 can be good - value to start with. -4. Numerical differentiation is very inefficient - one gradient - calculation needs 4*N function evaluations. This function will work for - any N - either small (1...10), moderate (10...100) or large (100...). - However, performance penalty will be too severe for any N's except for - small ones. - We should also say that code which relies on numerical differentiation - is less robust and precise. CG needs exact gradient values. Imprecise - gradient may slow down convergence, especially on highly nonlinear - problems. - Thus we recommend to use this function for fast prototyping on small- - dimensional problems only, and to implement analytical gradient as soon - as possible. + // + // Test logisticfit5() on carefully designed data with a priori known answer. + // + logisticfit5(x, y, n, a, b, c, d, g, rep); + printf("%.1f\n", double(a)); // EXPECTED: -1.000 + printf("%.1f\n", double(b)); // EXPECTED: 1.200 + printf("%.1f\n", double(c)); // EXPECTED: 0.900 + printf("%.1f\n", double(d)); // EXPECTED: 1.000 + printf("%.1f\n", double(g)); // EXPECTED: 1.200 - -- ALGLIB -- - Copyright 16.05.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minbleiccreatef( - real_1d_array x, - double diffstep, - minbleicstate& state); -void alglib::minbleiccreatef( - ae_int_t n, - real_1d_array x, - double diffstep, - minbleicstate& state); + // + // Evaluate model at point x=0.5 + // + double v; + v = logisticcalc5(0.5, a, b, c, d, g); + printf("%.2f\n", double(v)); // EXPECTED: -0.2354656824 + return 0; +} -
    -

    Examples:   [1]  

    - + + +
    + +mannwhitneyutest
    + + +
    +
     
    /************************************************************************* -This family of functions is used to launcn iterations of nonlinear optimizer +Mann-Whitney U-test -These functions accept following parameters: - state - algorithm state - func - callback which calculates function (or merit function) - value func at given point x - grad - callback which calculates function (or merit function) - value func and gradient grad at given point x - rep - optional callback which is called after each iteration - can be NULL - ptr - optional pointer which is passed to func/grad/hess/jac/rep - can be NULL - -NOTES: - -1. This function has two different implementations: one which uses exact - (analytical) user-supplied gradient, and one which uses function value - only and numerically differentiates function in order to obtain - gradient. +This test checks hypotheses about whether X and Y are samples of two +continuous distributions of the same shape and same median or whether +their medians are different. - Depending on the specific function used to create optimizer object - (either MinBLEICCreate() for analytical gradient or MinBLEICCreateF() - for numerical differentiation) you should choose appropriate variant of - MinBLEICOptimize() - one which accepts function AND gradient or one - which accepts function ONLY. +The following tests are performed: + * two-tailed test (null hypothesis - the medians are equal) + * left-tailed test (null hypothesis - the median of the first sample + is greater than or equal to the median of the second sample) + * right-tailed test (null hypothesis - the median of the first sample + is less than or equal to the median of the second sample). - Be careful to choose variant of MinBLEICOptimize() which corresponds to - your optimization scheme! Table below lists different combinations of - callback (function/gradient) passed to MinBLEICOptimize() and specific - function used to create optimizer. +Requirements: + * the samples are independent + * X and Y are continuous distributions (or discrete distributions well- + approximating continuous distributions) + * distributions of X and Y have the same shape. The only possible + difference is their position (i.e. the value of the median) + * the number of elements in each sample is not less than 5 + * the scale of measurement should be ordinal, interval or ratio (i.e. + the test could not be applied to nominal variables). +The test is non-parametric and doesn't require distributions to be normal. - | USER PASSED TO MinBLEICOptimize() - CREATED WITH | function only | function and gradient - ------------------------------------------------------------ - MinBLEICCreateF() | work FAIL - MinBLEICCreate() | FAIL work +Input parameters: + X - sample 1. Array whose index goes from 0 to N-1. + N - size of the sample. N>=5 + Y - sample 2. Array whose index goes from 0 to M-1. + M - size of the sample. M>=5 - Here "FAIL" denotes inappropriate combinations of optimizer creation - function and MinBLEICOptimize() version. Attemps to use such - combination (for example, to create optimizer with MinBLEICCreateF() - and to pass gradient information to MinCGOptimize()) will lead to - exception being thrown. Either you did not pass gradient when it WAS - needed or you passed gradient when it was NOT needed. +Output parameters: + BothTails - p-value for two-tailed test. + If BothTails is less than the given significance level + the null hypothesis is rejected. + LeftTail - p-value for left-tailed test. + If LeftTail is less than the given significance level, + the null hypothesis is rejected. + RightTail - p-value for right-tailed test. + If RightTail is less than the given significance level + the null hypothesis is rejected. - -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey -*************************************************************************/ -
    void minbleicoptimize(minbleicstate &state, - void (*func)(const real_1d_array &x, double &func, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minbleicoptimize(minbleicstate &state, - void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -
    -

    Examples:   [1]  [2]  [3]  [4]  

    - -
    -
    /************************************************************************* -This subroutine submits request for termination of running optimizer. It -should be called from user-supplied callback when user decides that it is -time to "smoothly" terminate optimization process. As result, optimizer -stops at point which was "current accepted" when termination request was -submitted and returns error code 8 (successful termination). +To calculate p-values, special approximation is used. This method lets us +calculate p-values with satisfactory accuracy in interval [0.0001, 1]. +There is no approximation outside the [0.0001, 1] interval. Therefore, if +the significance level outlies this interval, the test returns 0.0001. -INPUT PARAMETERS: - State - optimizer structure +Relative precision of approximation of p-value: -NOTE: after request for termination optimizer may perform several - additional calls to user-supplied callbacks. It does NOT guarantee - to stop immediately - it just guarantees that these additional calls - will be discarded later. +N M Max.err. Rms.err. +5..10 N..10 1.4e-02 6.0e-04 +5..10 N..100 2.2e-02 5.3e-06 +10..15 N..15 1.0e-02 3.2e-04 +10..15 N..100 1.0e-02 2.2e-05 +15..100 N..100 6.1e-03 2.7e-06 -NOTE: calling this function on optimizer which is NOT running will have no - effect. +For N,M>100 accuracy checks weren't put into practice, but taking into +account characteristics of asymptotic approximation used, precision should +not be sharply different from the values for interval [5, 100]. -NOTE: multiple calls to this function are possible. First call is counted, - subsequent calls are silently ignored. +NOTE: P-value approximation was optimized for 0.0001<=p<=0.2500. Thus, + P's outside of this interval are enforced to these bounds. Say, you + may quite often get P equal to exactly 0.25 or 0.0001. -- ALGLIB -- - Copyright 08.10.2014 by Bochkanov Sergey + Copyright 09.04.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicrequesttermination(minbleicstate state); +
    void alglib::mannwhitneyutest( + real_1d_array x, + ae_int_t n, + real_1d_array y, + ae_int_t m, + double& bothtails, + double& lefttail, + double& righttail, + const xparams _params = alglib::xdefault);
    - + +
    + +cmatrixdet
    +cmatrixludet
    +rmatrixdet
    +rmatrixludet
    +spdmatrixcholeskydet
    +spdmatrixdet
    + + + + + + + +
    matdet_d_1 Determinant calculation, real matrix, short form
    matdet_d_2 Determinant calculation, real matrix, full form
    matdet_d_3 Determinant calculation, complex matrix, short form
    matdet_d_4 Determinant calculation, complex matrix, full form
    matdet_d_5 Determinant calculation, complex matrix with zero imaginary part, short form
    +
     
    /************************************************************************* -This subroutine restarts algorithm from new point. -All optimization parameters (including constraints) are left unchanged. +Calculation of the determinant of a general matrix -This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. +Input parameters: + A - matrix, array[0..N-1, 0..N-1] + N - (optional) size of matrix A: + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, automatically determined from matrix size + (A must be square matrix) -INPUT PARAMETERS: - State - structure previously allocated with MinBLEICCreate call. - X - new starting point. +Result: determinant of matrix A. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicrestartfrom(minbleicstate state, real_1d_array x); +
    alglib::complex alglib::cmatrixdet( + complex_2d_array a, + const xparams _params = alglib::xdefault); +alglib::complex alglib::cmatrixdet( + complex_2d_array a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -BLEIC results +Determinant calculation of the matrix given by its LU decomposition. -INPUT PARAMETERS: - State - algorithm state +Input parameters: + A - LU decomposition of the matrix (output of + RMatrixLU subroutine). + Pivots - table of permutations which were made during + the LU decomposition. + Output of RMatrixLU subroutine. + N - (optional) size of matrix A: + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, automatically determined from matrix size + (A must be square matrix) -OUTPUT PARAMETERS: - X - array[0..N-1], solution - Rep - optimization report. You should check Rep.TerminationType - in order to distinguish successful termination from - unsuccessful one: - * -8 internal integrity control detected infinite or - NAN values in function/gradient. Abnormal - termination signalled. - * -7 gradient verification failed. - See MinBLEICSetGradientCheck() for more information. - * -3 inconsistent constraints. Feasible point is - either nonexistent or too hard to find. Try to - restart optimizer with better initial approximation - * 1 relative function improvement is no more than EpsF. - * 2 scaled step is no more than EpsX. - * 4 scaled gradient norm is no more than EpsG. - * 5 MaxIts steps was taken - * 8 terminated by user who called minbleicrequesttermination(). - X contains point which was "current accepted" when - termination request was submitted. - More information about fields of this structure can be - found in the comments on MinBLEICReport datatype. +Result: matrix determinant. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicresults( - minbleicstate state, - real_1d_array& x, - minbleicreport& rep); +
    alglib::complex alglib::cmatrixludet( + complex_2d_array a, + integer_1d_array pivots, + const xparams _params = alglib::xdefault); +alglib::complex alglib::cmatrixludet( + complex_2d_array a, + integer_1d_array pivots, + ae_int_t n, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -BLEIC results +Calculation of the determinant of a general matrix -Buffered implementation of MinBLEICResults() which uses pre-allocated buffer -to store X[]. If buffer size is too small, it resizes buffer. It is -intended to be used in the inner cycles of performance critical algorithms -where array reallocation penalty is too large to be ignored. +Input parameters: + A - matrix, array[0..N-1, 0..N-1] + N - (optional) size of matrix A: + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, automatically determined from matrix size + (A must be square matrix) + +Result: determinant of matrix A. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicresultsbuf( - minbleicstate state, - real_1d_array& x, - minbleicreport& rep); +
    double alglib::rmatrixdet( + real_2d_array a, + const xparams _params = alglib::xdefault); +double alglib::rmatrixdet( + real_2d_array a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function sets boundary constraints for BLEIC optimizer. - -Boundary constraints are inactive by default (after initial creation). -They are preserved after algorithm restart with MinBLEICRestartFrom(). - -INPUT PARAMETERS: - State - structure stores algorithm state - BndL - lower bounds, array[N]. - If some (all) variables are unbounded, you may specify - very small number or -INF. - BndU - upper bounds, array[N]. - If some (all) variables are unbounded, you may specify - very large number or +INF. +Determinant calculation of the matrix given by its LU decomposition. -NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th -variable will be "frozen" at X[i]=BndL[i]=BndU[i]. +Input parameters: + A - LU decomposition of the matrix (output of + RMatrixLU subroutine). + Pivots - table of permutations which were made during + the LU decomposition. + Output of RMatrixLU subroutine. + N - (optional) size of matrix A: + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, automatically determined from matrix size + (A must be square matrix) -NOTE 2: this solver has following useful properties: -* bound constraints are always satisfied exactly -* function is evaluated only INSIDE area specified by bound constraints, - even when numerical differentiation is used (algorithm adjusts nodes - according to boundary constraints) +Result: matrix determinant. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 2005 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetbc( - minbleicstate state, - real_1d_array bndl, - real_1d_array bndu); +
    double alglib::rmatrixludet( + real_2d_array a, + integer_1d_array pivots, + const xparams _params = alglib::xdefault); +double alglib::rmatrixludet( + real_2d_array a, + integer_1d_array pivots, + ae_int_t n, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -This function sets stopping conditions for the optimizer. +Determinant calculation of the matrix given by the Cholesky decomposition. -INPUT PARAMETERS: - State - structure which stores algorithm state - EpsG - >=0 - The subroutine finishes its work if the condition - |v|<EpsG is satisfied, where: - * |.| means Euclidian norm - * v - scaled gradient vector, v[i]=g[i]*s[i] - * g - gradient - * s - scaling coefficients set by MinBLEICSetScale() - EpsF - >=0 - The subroutine finishes its work if on k+1-th iteration - the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} - is satisfied. - EpsX - >=0 - The subroutine finishes its work if on k+1-th iteration - the condition |v|<=EpsX is fulfilled, where: - * |.| means Euclidian norm - * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - step vector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinBLEICSetScale() - MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. +Input parameters: + A - Cholesky decomposition, + output of SMatrixCholesky subroutine. + N - (optional) size of matrix A: + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, automatically determined from matrix size + (A must be square matrix) -Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead -to automatic stopping criterion selection. +As the determinant is equal to the product of squares of diagonal elements, +it's not necessary to specify which triangle - lower or upper - the matrix +is stored in. -NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform - slightly more than MaxIts iterations. I.e., MaxIts sets non-strict - limit on iterations count. +Result: + matrix determinant. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 2005-2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetcond( - minbleicstate state, - double epsg, - double epsf, - double epsx, - ae_int_t maxits); +
    double alglib::spdmatrixcholeskydet( + real_2d_array a, + const xparams _params = alglib::xdefault); +double alglib::spdmatrixcholeskydet( + real_2d_array a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -This subroutine turns on verification of the user-supplied analytic -gradient: -* user calls this subroutine before optimization begins -* MinBLEICOptimize() is called -* prior to actual optimization, for each component of parameters being - optimized X[i] algorithm performs following steps: - * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], - where X[i] is i-th component of the initial point and S[i] is a scale - of i-th parameter - * if needed, steps are bounded with respect to constraints on X[] - * F(X) is evaluated at these trial points - * we perform one more evaluation in the middle point of the interval - * we build cubic model using function values and derivatives at trial - points and we compare its prediction with actual value in the middle - point - * in case difference between prediction and actual value is higher than - some predetermined threshold, algorithm stops with completion code -7; - Rep.VarIdx is set to index of the parameter with incorrect derivative. -* after verification is over, algorithm proceeds to the actual optimization. - -NOTE 1: verification needs N (parameters count) gradient evaluations. It - is very costly and you should use it only for low dimensional - problems, when you want to be sure that you've correctly - calculated analytic derivatives. You should not use it in the - production code (unless you want to check derivatives provided by - some third party). - -NOTE 2: you should carefully choose TestStep. Value which is too large - (so large that function behaviour is significantly non-cubic) will - lead to false alarms. You may use different step for different - parameters by means of setting scale with MinBLEICSetScale(). +Determinant calculation of the symmetric positive definite matrix. -NOTE 3: this function may lead to false positives. In case it reports that - I-th derivative was calculated incorrectly, you may decrease test - step and try one more time - maybe your function changes too - sharply and your step is too large for such rapidly chanding - function. +Input parameters: + A - matrix. Array with elements [0..N-1, 0..N-1]. + N - (optional) size of matrix A: + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, automatically determined from matrix size + (A must be square matrix) + IsUpper - (optional) storage type: + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used/changed by + function + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used/changed by + function + * if not given, both lower and upper triangles must be + filled. -INPUT PARAMETERS: - State - structure used to store algorithm state - TestStep - verification step: - * TestStep=0 turns verification off - * TestStep>0 activates verification +Result: + determinant of matrix A. + If matrix A is not positive definite, exception is thrown. -- ALGLIB -- - Copyright 15.06.2012 by Bochkanov Sergey + Copyright 2005-2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetgradientcheck( - minbleicstate state, - double teststep); +
    double alglib::spdmatrixdet( + real_2d_array a, + const xparams _params = alglib::xdefault); +double alglib::spdmatrixdet( + real_2d_array a, + ae_int_t n, + bool isupper, + const xparams _params = alglib::xdefault);
    - +
    -
    /************************************************************************* -This function sets linear constraints for BLEIC optimizer. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -Linear constraints are inactive by default (after initial creation). -They are preserved after algorithm restart with MinBLEICRestartFrom(). +using namespace alglib; -INPUT PARAMETERS: - State - structure previously allocated with MinBLEICCreate call. - C - linear constraints, array[K,N+1]. - Each row of C represents one constraint, either equality - or inequality (see below): - * first N elements correspond to coefficients, - * last element corresponds to the right part. - All elements of C (including right part) must be finite. - CT - type of constraints, array[K]: - * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] - * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] - * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] - K - number of equality/inequality constraints, K>=0: - * if given, only leading K elements of C/CT are used - * if not given, automatically determined from sizes of C/CT -NOTE 1: linear (non-bound) constraints are satisfied only approximately: -* there always exists some minor violation (about Epsilon in magnitude) - due to rounding errors -* numerical differentiation, if used, may lead to function evaluations - outside of the feasible area, because algorithm does NOT change - numerical differentiation formula according to linear constraints. -If you want constraints to be satisfied exactly, try to reformulate your -problem in such manner that all constraints will become boundary ones -(this kind of constraints is always satisfied exactly, both in the final -solution and in all intermediate points). +int main(int argc, char **argv) +{ + real_2d_array b = "[[1,2],[2,1]]"; + double a; + a = rmatrixdet(b); + printf("%.3f\n", double(a)); // EXPECTED: -3 + return 0; +} - -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minbleicsetlc( - minbleicstate state, - real_2d_array c, - integer_1d_array ct); -void alglib::minbleicsetlc( - minbleicstate state, - real_2d_array c, - integer_1d_array ct, - ae_int_t k); -
    -

    Examples:   [1]  

    - +
    -
    /************************************************************************* -Modification of the preconditioner: preconditioning is turned off. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -INPUT PARAMETERS: - State - structure which stores algorithm state +using namespace alglib; - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minbleicsetprecdefault(minbleicstate state); -
    - +int main(int argc, char **argv) +{ + real_2d_array b = "[[5,4],[4,5]]"; + double a; + a = rmatrixdet(b, 2); + printf("%.3f\n", double(a)); // EXPECTED: 9 + return 0; +} + + +
    -
    /************************************************************************* -Modification of the preconditioner: diagonal of approximate Hessian is -used. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -INPUT PARAMETERS: - State - structure which stores algorithm state - D - diagonal of the approximate Hessian, array[0..N-1], - (if larger, only leading N elements are used). +using namespace alglib; -NOTE 1: D[i] should be positive. Exception will be thrown otherwise. -NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. +int main(int argc, char **argv) +{ + complex_2d_array b = "[[1+1i,2],[2,1-1i]]"; + alglib::complex a; + a = cmatrixdet(b); + printf("%s\n", a.tostring(3).c_str()); // EXPECTED: -2 + return 0; +} - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minbleicsetprecdiag(minbleicstate state, real_1d_array d); -
    - +
    -
    /************************************************************************* -Modification of the preconditioner: scale-based diagonal preconditioning. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -This preconditioning mode can be useful when you don't have approximate -diagonal of Hessian, but you know that your variables are badly scaled -(for example, one variable is in [1,10], and another in [1000,100000]), -and most part of the ill-conditioning comes from different scales of vars. +using namespace alglib; -In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), -can greatly improve convergence. -IMPRTANT: you should set scale of your variables with MinBLEICSetScale() -call (before or after MinBLEICSetPrecScale() call). Without knowledge of -the scale of your variables scale-based preconditioner will be just unit -matrix. +int main(int argc, char **argv) +{ + alglib::complex a; + complex_2d_array b = "[[5i,4],[4i,5]]"; + a = cmatrixdet(b, 2); + printf("%s\n", a.tostring(3).c_str()); // EXPECTED: 9i + return 0; +} + + +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "linalg.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    alglib::complex a;
    +    complex_2d_array b = "[[9,1],[2,1]]";
    +    a = cmatrixdet(b);
    +    printf("%s\n", a.tostring(3).c_str()); // EXPECTED: 7
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +Generation of random NxN complex matrix with given condition number C and +norm2(A)=1 INPUT PARAMETERS: - State - structure which stores algorithm state + N - matrix size + C - condition number (in 2-norm) - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey +OUTPUT PARAMETERS: + A - random matrix with norm2(A)=1 and cond(A)=C + + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetprecscale(minbleicstate state); +
    void alglib::cmatrixrndcond( + ae_int_t n, + double c, + complex_2d_array& a, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets scaling coefficients for BLEIC optimizer. - -ALGLIB optimizers use scaling matrices to test stopping conditions (step -size and gradient are scaled before comparison with tolerances). Scale of -the I-th variable is a translation invariant measure of: -a) "how large" the variable is -b) how large the step should be to make significant changes in the function +Generation of a random Haar distributed orthogonal complex matrix -Scaling is also used by finite difference variant of the optimizer - step -along I-th axis is equal to DiffStep*S[I]. +INPUT PARAMETERS: + N - matrix size, N>=1 -In most optimizers (and in the BLEIC too) scaling is NOT a form of -preconditioning. It just affects stopping conditions. You should set -preconditioner by separate call to one of the MinBLEICSetPrec...() -functions. +OUTPUT PARAMETERS: + A - orthogonal NxN matrix, array[0..N-1,0..N-1] -There is a special preconditioning mode, however, which uses scaling -coefficients to form diagonal preconditioning matrix. You can turn this -mode on, if you want. But you should understand that scaling is not the -same thing as preconditioning - these are two different, although related -forms of tuning solver. +NOTE: this function uses algorithm described in Stewart, G. W. (1980), + "The Efficient Generation of Random Orthogonal Matrices with an + Application to Condition Estimators". -INPUT PARAMETERS: - State - structure stores algorithm state - S - array[N], non-zero scaling coefficients - S[i] may be negative, sign doesn't matter. + Speaking short, to generate an (N+1)x(N+1) orthogonal matrix, it: + * takes an NxN one + * takes uniformly distributed unit vector of dimension N+1. + * constructs a Householder reflection from the vector, then applies + it to the smaller matrix (embedded in the larger size with a 1 at + the bottom right corner). - -- ALGLIB -- - Copyright 14.01.2011 by Bochkanov Sergey + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetscale(minbleicstate state, real_1d_array s); +
    void alglib::cmatrixrndorthogonal( + ae_int_t n, + complex_2d_array& a, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets maximum step length +Multiplication of MxN complex matrix by MxM random Haar distributed +complex orthogonal matrix -IMPORTANT: this feature is hard to combine with preconditioning. You can't -set upper limit on step length, when you solve optimization problem with -linear (non-boundary) constraints AND preconditioner turned on. +INPUT PARAMETERS: + A - matrix, array[0..M-1, 0..N-1] + M, N- matrix size -When non-boundary constraints are present, you have to either a) use -preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH! -In this case algorithm will terminate with appropriate error code. +OUTPUT PARAMETERS: + A - Q*A, where Q is random MxM orthogonal matrix + + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::cmatrixrndorthogonalfromtheleft( + complex_2d_array& a, + ae_int_t m, + ae_int_t n, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Multiplication of MxN complex matrix by NxN random Haar distributed +complex orthogonal matrix INPUT PARAMETERS: - State - structure which stores algorithm state - StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't - want to limit step length. + A - matrix, array[0..M-1, 0..N-1] + M, N- matrix size -Use this subroutine when you optimize target function which contains exp() -or other fast growing functions, and optimization algorithm makes too -large steps which lead to overflow. This function allows us to reject -steps that are too large (and therefore expose us to the possible -overflow) without actually calculating function value at the x+stp*d. +OUTPUT PARAMETERS: + A - A*Q, where Q is random NxN orthogonal matrix - -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetstpmax(minbleicstate state, double stpmax); +
    void alglib::cmatrixrndorthogonalfromtheright( + complex_2d_array& a, + ae_int_t m, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function turns on/off reporting. +Generation of random NxN Hermitian matrix with given condition number and +norm2(A)=1 INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not + N - matrix size + C - condition number (in 2-norm) -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinBLEICOptimize(). +OUTPUT PARAMETERS: + A - random matrix with norm2(A)=1 and cond(A)=C - -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetxrep(minbleicstate state, bool needxrep); +
    void alglib::hmatrixrndcond( + ae_int_t n, + double c, + complex_2d_array& a, + const xparams _params = alglib::xdefault);
    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +Hermitian multiplication of NxN matrix by random Haar distributed +complex orthogonal matrix -using namespace alglib; -void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) -{ - // - // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4 - // and its derivatives df/d0 and df/dx1 - // - func = 100*pow(x[0]+3,4) + pow(x[1]-3,4); - grad[0] = 400*pow(x[0]+3,3); - grad[1] = 4*pow(x[1]-3,3); -} +INPUT PARAMETERS: + A - matrix, array[0..N-1, 0..N-1] + N - matrix size -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4 - // subject to bound constraints -1<=x<=+1, -1<=y<=+1, using BLEIC optimizer. - // - real_1d_array x = "[0,0]"; - real_1d_array bndl = "[-1,-1]"; - real_1d_array bndu = "[+1,+1]"; - minbleicstate state; - minbleicreport rep; +OUTPUT PARAMETERS: + A - Q^H*A*Q, where Q is random NxN orthogonal matrix - // - // These variables define stopping conditions for the optimizer. - // - // We use very simple condition - |g|<=epsg - // - double epsg = 0.000001; - double epsf = 0; - double epsx = 0; - ae_int_t maxits = 0; + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::hmatrixrndmultiply( + complex_2d_array& a, + ae_int_t n, + const xparams _params = alglib::xdefault); - // - // Now we are ready to actually optimize something: - // * first we create optimizer - // * we add boundary constraints - // * we tune stopping conditions - // * and, finally, optimize and obtain results... - // - minbleiccreate(x, state); - minbleicsetbc(state, bndl, bndu); - minbleicsetcond(state, epsg, epsf, epsx, maxits); - alglib::minbleicoptimize(state, function1_grad); - minbleicresults(state, x, rep); +
    + +
    +
    /************************************************************************* +Generation of random NxN Hermitian positive definite matrix with given +condition number and norm2(A)=1 - // - // ...and evaluate these results - // - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,1] - return 0; -} +INPUT PARAMETERS: + N - matrix size + C - condition number (in 2-norm) +OUTPUT PARAMETERS: + A - random HPD matrix with norm2(A)=1 and cond(A)=C -
    + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::hpdmatrixrndcond( + ae_int_t n, + double c, + complex_2d_array& a, + const xparams _params = alglib::xdefault); + +
    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +Generation of random NxN matrix with given condition number and norm2(A)=1 -using namespace alglib; -void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) -{ - // - // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4 - // and its derivatives df/d0 and df/dx1 - // - func = 100*pow(x[0]+3,4) + pow(x[1]-3,4); - grad[0] = 400*pow(x[0]+3,3); - grad[1] = 4*pow(x[1]-3,3); -} +INPUT PARAMETERS: + N - matrix size + C - condition number (in 2-norm) -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4 - // subject to inequality constraints: - // * x>=2 (posed as general linear constraint), - // * x+y>=6 - // using BLEIC optimizer. - // - real_1d_array x = "[5,5]"; - real_2d_array c = "[[1,0,2],[1,1,6]]"; - integer_1d_array ct = "[1,1]"; - minbleicstate state; - minbleicreport rep; +OUTPUT PARAMETERS: + A - random matrix with norm2(A)=1 and cond(A)=C - // - // These variables define stopping conditions for the optimizer. - // - // We use very simple condition - |g|<=epsg - // - double epsg = 0.000001; - double epsf = 0; - double epsx = 0; - ae_int_t maxits = 0; + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixrndcond( + ae_int_t n, + double c, + real_2d_array& a, + const xparams _params = alglib::xdefault); - // - // Now we are ready to actually optimize something: - // * first we create optimizer - // * we add linear constraints - // * we tune stopping conditions - // * and, finally, optimize and obtain results... - // - minbleiccreate(x, state); - minbleicsetlc(state, c, ct); - minbleicsetcond(state, epsg, epsf, epsx, maxits); - alglib::minbleicoptimize(state, function1_grad); - minbleicresults(state, x, rep); +
    + +
    +
    /************************************************************************* +Generation of a random uniformly distributed (Haar) orthogonal matrix - // - // ...and evaluate these results - // - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2,4] - return 0; -} +INPUT PARAMETERS: + N - matrix size, N>=1 +OUTPUT PARAMETERS: + A - orthogonal NxN matrix, array[0..N-1,0..N-1] -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +NOTE: this function uses algorithm  described  in  Stewart, G. W.  (1980),
    +      "The Efficient Generation of  Random  Orthogonal  Matrices  with  an
    +      Application to Condition Estimators".
     
    -using namespace alglib;
    -void s1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr)
    -{
    -    //
    -    // this callback calculates f(x) = (1+x)^(-0.2) + (1-x)^(-0.3) + 1000*x and its gradient.
    -    //
    -    // function is trimmed when we calculate it near the singular points or outside of the [-1,+1].
    -    // Note that we do NOT calculate gradient in this case.
    -    //
    -    if( (x[0]<=-0.999999999999) || (x[0]>=+0.999999999999) )
    -    {
    -        func = 1.0E+300;
    -        return;
    -    }
    -    func = pow(1+x[0],-0.2) + pow(1-x[0],-0.3) + 1000*x[0];
    -    grad[0] = -0.2*pow(1+x[0],-1.2) +0.3*pow(1-x[0],-1.3) + 1000;
    -}
    +      Speaking short, to generate an (N+1)x(N+1) orthogonal matrix, it:
    +      * takes an NxN one
    +      * takes uniformly distributed unit vector of dimension N+1.
    +      * constructs a Householder reflection from the vector, then applies
    +        it to the smaller matrix (embedded in the larger size with a 1 at
    +        the bottom right corner).
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of f(x) = (1+x)^(-0.2) + (1-x)^(-0.3) + 1000*x.
    -    //
    -    // This function is undefined outside of (-1,+1) and has singularities at x=-1 and x=+1.
    -    // Special technique called "function trimming" allows us to solve this optimization problem 
    -    // - without using boundary constraints!
    -    //
    -    // See http://www.alglib.net/optimization/tipsandtricks.php#ftrimming for more information
    -    // on this subject.
    -    //
    -    real_1d_array x = "[0]";
    -    double epsg = 1.0e-6;
    -    double epsf = 0;
    -    double epsx = 0;
    -    ae_int_t maxits = 0;
    -    minbleicstate state;
    -    minbleicreport rep;
    +  -- ALGLIB routine --
    +     04.12.2009
    +     Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::rmatrixrndorthogonal( + ae_int_t n, + real_2d_array& a, + const xparams _params = alglib::xdefault); - minbleiccreate(x, state); - minbleicsetcond(state, epsg, epsf, epsx, maxits); - alglib::minbleicoptimize(state, s1_grad); - minbleicresults(state, x, rep); +
    + +
    +
    /************************************************************************* +Multiplication of MxN matrix by MxM random Haar distributed orthogonal matrix - printf("%s\n", x.tostring(5).c_str()); // EXPECTED: [-0.99917305] - return 0; -} +INPUT PARAMETERS: + A - matrix, array[0..M-1, 0..N-1] + M, N- matrix size + +OUTPUT PARAMETERS: + A - Q*A, where Q is random MxM orthogonal matrix + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixrndorthogonalfromtheleft( + real_2d_array& a, + ae_int_t m, + ae_int_t n, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +Multiplication of MxN matrix by NxN random Haar distributed orthogonal matrix -using namespace alglib; -void function1_func(const real_1d_array &x, double &func, void *ptr) -{ - // - // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4 - // - func = 100*pow(x[0]+3,4) + pow(x[1]-3,4); -} +INPUT PARAMETERS: + A - matrix, array[0..M-1, 0..N-1] + M, N- matrix size -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4 - // subject to bound constraints -1<=x<=+1, -1<=y<=+1, using BLEIC optimizer. - // - real_1d_array x = "[0,0]"; - real_1d_array bndl = "[-1,-1]"; - real_1d_array bndu = "[+1,+1]"; - minbleicstate state; - minbleicreport rep; +OUTPUT PARAMETERS: + A - A*Q, where Q is random NxN orthogonal matrix - // - // These variables define stopping conditions for the optimizer. - // - // We use very simple condition - |g|<=epsg - // - double epsg = 0.000001; - double epsf = 0; - double epsx = 0; - ae_int_t maxits = 0; + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixrndorthogonalfromtheright( + real_2d_array& a, + ae_int_t m, + ae_int_t n, + const xparams _params = alglib::xdefault); - // - // This variable contains differentiation step - // - double diffstep = 1.0e-6; +
    + +
    +
    /************************************************************************* +Generation of random NxN symmetric matrix with given condition number and +norm2(A)=1 - // - // Now we are ready to actually optimize something: - // * first we create optimizer - // * we add boundary constraints - // * we tune stopping conditions - // * and, finally, optimize and obtain results... - // - minbleiccreatef(x, diffstep, state); - minbleicsetbc(state, bndl, bndu); - minbleicsetcond(state, epsg, epsf, epsx, maxits); - alglib::minbleicoptimize(state, function1_func); - minbleicresults(state, x, rep); +INPUT PARAMETERS: + N - matrix size + C - condition number (in 2-norm) - // - // ...and evaluate these results - // - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,1] - return 0; -} +OUTPUT PARAMETERS: + A - random matrix with norm2(A)=1 and cond(A)=C + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::smatrixrndcond( + ae_int_t n, + double c, + real_2d_array& a, + const xparams _params = alglib::xdefault); -
    -
    - -mincgreport
    -mincgstate
    - -mincgcreate
    -mincgcreatef
    -mincgoptimize
    -mincgrequesttermination
    -mincgrestartfrom
    -mincgresults
    -mincgresultsbuf
    -mincgsetcgtype
    -mincgsetcond
    -mincgsetgradientcheck
    -mincgsetprecdefault
    -mincgsetprecdiag
    -mincgsetprecscale
    -mincgsetscale
    -mincgsetstpmax
    -mincgsetxrep
    -mincgsuggeststep
    - - - - - - -
    mincg_d_1 Nonlinear optimization by CG
    mincg_d_2 Nonlinear optimization with additional settings and restarts
    mincg_ftrim Nonlinear optimization by CG, function with singularities
    mincg_numdiff Nonlinear optimization by CG with numerical differentiation
    - + +
     
    /************************************************************************* -This structure stores optimization report: -* IterationsCount total number of inner iterations -* NFEV number of gradient evaluations -* TerminationType termination type (see below) +Symmetric multiplication of NxN matrix by random Haar distributed +orthogonal matrix -TERMINATION CODES +INPUT PARAMETERS: + A - matrix, array[0..N-1, 0..N-1] + N - matrix size -TerminationType field contains completion code, which can be: - -8 internal integrity control detected infinite or NAN values in - function/gradient. Abnormal termination signalled. - -7 gradient verification failed. - See MinCGSetGradientCheck() for more information. - 1 relative function improvement is no more than EpsF. - 2 relative step is no more than EpsX. - 4 gradient norm is no more than EpsG - 5 MaxIts steps was taken - 7 stopping conditions are too stringent, - further improvement is impossible, - X contains best point found so far. - 8 terminated by user who called mincgrequesttermination(). X contains - point which was "current accepted" when termination request was - submitted. +OUTPUT PARAMETERS: + A - Q'*A*Q, where Q is random NxN orthogonal matrix -Other fields of this structure are not documented and should not be used! + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey *************************************************************************/ -
    class mincgreport -{ - ae_int_t iterationscount; - ae_int_t nfev; - ae_int_t varidx; - ae_int_t terminationtype; -}; +
    void alglib::smatrixrndmultiply( + real_2d_array& a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This object stores state of the nonlinear CG optimizer. +Generation of random NxN symmetric positive definite matrix with given +condition number and norm2(A)=1 -You should use ALGLIB functions to work with this object. +INPUT PARAMETERS: + N - matrix size + C - condition number (in 2-norm) + +OUTPUT PARAMETERS: + A - random SPD matrix with norm2(A)=1 and cond(A)=C + + -- ALGLIB routine -- + 04.12.2009 + Bochkanov Sergey *************************************************************************/ -
    class mincgstate +
    void alglib::spdmatrixrndcond( + ae_int_t n, + double c, + real_2d_array& a, + const xparams _params = alglib::xdefault); + +
    + + + +
    +
    /************************************************************************* +Matrix inverse report: +* R1 reciprocal of condition number in 1-norm +* RInf reciprocal of condition number in inf-norm +*************************************************************************/ +
    class matinvreport { + double r1; + double rinf; };
    - +
     
    /************************************************************************* - NONLINEAR CONJUGATE GRADIENT METHOD - -DESCRIPTION: -The subroutine minimizes function F(x) of N arguments by using one of the -nonlinear conjugate gradient methods. +Inversion of a general matrix. -These CG methods are globally convergent (even on non-convex functions) as -long as grad(f) is Lipschitz continuous in a some neighborhood of the -L = { x : f(x)<=f(x0) }. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +Input parameters: + A - matrix + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) -REQUIREMENTS: -Algorithm will request following information during its operation: -* function value F and its gradient G (simultaneously) at given point X +Output parameters: + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse + -- ALGLIB -- + Copyright 2005 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::cmatrixinverse( + complex_2d_array& a, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::cmatrixinverse( + complex_2d_array& a, + ae_int_t n, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); -USAGE: -1. User initializes algorithm state with MinCGCreate() call -2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and - other functions -3. User calls MinCGOptimize() function which takes algorithm state and - pointer (delegate, etc.) to callback function which calculates F/G. -4. User calls MinCGResults() to get solution -5. Optionally, user may call MinCGRestartFrom() to solve another problem - with same N but another starting point and/or another function. - MinCGRestartFrom() allows to reuse already initialized structure. +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +Inversion of a matrix given by its LU decomposition. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - N - problem dimension, N>0: - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - X - starting point, array[0..N-1]. + A - LU decomposition of the matrix + (output of CMatrixLU subroutine). + Pivots - table of permutations + (the output of CMatrixLU subroutine). + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) OUTPUT PARAMETERS: - State - structure which stores algorithm state + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse - -- ALGLIB -- - Copyright 25.03.2010 by Bochkanov Sergey + -- ALGLIB routine -- + 05.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgcreate(real_1d_array x, mincgstate& state); -void alglib::mincgcreate(ae_int_t n, real_1d_array x, mincgstate& state); +
    void alglib::cmatrixluinverse( + complex_2d_array& a, + integer_1d_array pivots, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::cmatrixluinverse( + complex_2d_array& a, + integer_1d_array pivots, + ae_int_t n, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -The subroutine is finite difference variant of MinCGCreate(). It uses -finite differences in order to differentiate target function. +Triangular matrix inverse (complex) -Description below contains information which is specific to this function -only. We recommend to read comments on MinCGCreate() in order to get more -information about creation of CG optimizer. +The subroutine inverts the following types of matrices: + * upper triangular + * upper triangular with unit diagonal + * lower triangular + * lower triangular with unit diagonal -INPUT PARAMETERS: - N - problem dimension, N>0: - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - X - starting point, array[0..N-1]. - DiffStep- differentiation step, >0 +In case of an upper (lower) triangular matrix, the inverse matrix will +also be upper (lower) triangular, and after the end of the algorithm, the +inverse matrix replaces the source matrix. The elements below (above) the +main diagonal are not changed by the algorithm. -OUTPUT PARAMETERS: - State - structure which stores algorithm state +If the matrix has a unit diagonal, the inverse matrix also has a unit +diagonal, and the diagonal elements are not passed to the algorithm. -NOTES: -1. algorithm uses 4-point central formula for differentiation. -2. differentiation step along I-th axis is equal to DiffStep*S[I] where - S[] is scaling vector which can be set by MinCGSetScale() call. -3. we recommend you to use moderate values of differentiation step. Too - large step will result in too large truncation errors, while too small - step will result in too large numerical errors. 1.0E-6 can be good - value to start with. -4. Numerical differentiation is very inefficient - one gradient - calculation needs 4*N function evaluations. This function will work for - any N - either small (1...10), moderate (10...100) or large (100...). - However, performance penalty will be too severe for any N's except for - small ones. - We should also say that code which relies on numerical differentiation - is less robust and precise. L-BFGS needs exact gradient values. - Imprecise gradient may slow down convergence, especially on highly - nonlinear problems. - Thus we recommend to use this function for fast prototyping on small- - dimensional problems only, and to implement analytical gradient as soon - as possible. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +Input parameters: + A - matrix, array[0..N-1, 0..N-1]. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + IsUpper - True, if the matrix is upper triangular. + IsUnit - diagonal type (optional): + * if True, matrix has unit diagonal (a[i,i] are NOT used) + * if False, matrix diagonal is arbitrary + * if not given, False is assumed + +Output parameters: + Info - same as for RMatrixLUInverse + Rep - same as for RMatrixLUInverse + A - same as for RMatrixLUInverse. -- ALGLIB -- - Copyright 16.05.2011 by Bochkanov Sergey + Copyright 05.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgcreatef( - real_1d_array x, - double diffstep, - mincgstate& state); -void alglib::mincgcreatef( +
    void alglib::cmatrixtrinverse( + complex_2d_array& a, + bool isupper, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::cmatrixtrinverse( + complex_2d_array& a, ae_int_t n, - real_1d_array x, - double diffstep, - mincgstate& state); + bool isupper, + bool isunit, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This family of functions is used to launcn iterations of nonlinear optimizer - -These functions accept following parameters: - state - algorithm state - func - callback which calculates function (or merit function) - value func at given point x - grad - callback which calculates function (or merit function) - value func and gradient grad at given point x - rep - optional callback which is called after each iteration - can be NULL - ptr - optional pointer which is passed to func/grad/hess/jac/rep - can be NULL - -NOTES: - -1. This function has two different implementations: one which uses exact - (analytical) user-supplied gradient, and one which uses function value - only and numerically differentiates function in order to obtain - gradient. - - Depending on the specific function used to create optimizer object - (either MinCGCreate() for analytical gradient or MinCGCreateF() for - numerical differentiation) you should choose appropriate variant of - MinCGOptimize() - one which accepts function AND gradient or one which - accepts function ONLY. - - Be careful to choose variant of MinCGOptimize() which corresponds to - your optimization scheme! Table below lists different combinations of - callback (function/gradient) passed to MinCGOptimize() and specific - function used to create optimizer. +Inversion of a Hermitian positive definite matrix which is given +by Cholesky decomposition. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - | USER PASSED TO MinCGOptimize() - CREATED WITH | function only | function and gradient - ------------------------------------------------------------ - MinCGCreateF() | work FAIL - MinCGCreate() | FAIL work +Input parameters: + A - Cholesky decomposition of the matrix to be inverted: + A=U'*U or A = L*L'. + Output of HPDMatrixCholesky subroutine. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + IsUpper - storage type (optional): + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used/changed by + function + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used/changed by + function + * if not given, lower half is used. - Here "FAIL" denotes inappropriate combinations of optimizer creation - function and MinCGOptimize() version. Attemps to use such combination - (for example, to create optimizer with MinCGCreateF() and to pass - gradient information to MinCGOptimize()) will lead to exception being - thrown. Either you did not pass gradient when it WAS needed or you - passed gradient when it was NOT needed. +Output parameters: + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse - -- ALGLIB -- - Copyright 20.04.2009 by Bochkanov Sergey + -- ALGLIB routine -- + 10.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void mincgoptimize(mincgstate &state, - void (*func)(const real_1d_array &x, double &func, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void mincgoptimize(mincgstate &state, - void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); +
    void alglib::hpdmatrixcholeskyinverse( + complex_2d_array& a, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::hpdmatrixcholeskyinverse( + complex_2d_array& a, + ae_int_t n, + bool isupper, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -This subroutine submits request for termination of running optimizer. It -should be called from user-supplied callback when user decides that it is -time to "smoothly" terminate optimization process. As result, optimizer -stops at point which was "current accepted" when termination request was -submitted and returns error code 8 (successful termination). +Inversion of a Hermitian positive definite matrix. -INPUT PARAMETERS: - State - optimizer structure +Given an upper or lower triangle of a Hermitian positive definite matrix, +the algorithm generates matrix A^-1 and saves the upper or lower triangle +depending on the input. -NOTE: after request for termination optimizer may perform several - additional calls to user-supplied callbacks. It does NOT guarantee - to stop immediately - it just guarantees that these additional calls - will be discarded later. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -NOTE: calling this function on optimizer which is NOT running will have no - effect. +Input parameters: + A - matrix to be inverted (upper or lower triangle). + Array with elements [0..N-1,0..N-1]. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + IsUpper - storage type (optional): + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used/changed by + function + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used/changed by + function + * if not given, both lower and upper triangles must be + filled. -NOTE: multiple calls to this function are possible. First call is counted, - subsequent calls are silently ignored. +Output parameters: + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse - -- ALGLIB -- - Copyright 08.10.2014 by Bochkanov Sergey + -- ALGLIB routine -- + 10.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgrequesttermination(mincgstate state); +
    void alglib::hpdmatrixinverse( + complex_2d_array& a, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::hpdmatrixinverse( + complex_2d_array& a, + ae_int_t n, + bool isupper, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine restarts CG algorithm from new point. All optimization -parameters are left unchanged. +Inversion of a general matrix. -This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - State - structure used to store algorithm state. - X - new starting point. +Input parameters: + A - matrix. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + +Output parameters: + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse + +Result: + True, if the matrix is not singular. + False, if the matrix is singular. -- ALGLIB -- - Copyright 30.07.2010 by Bochkanov Sergey + Copyright 2005-2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgrestartfrom(mincgstate state, real_1d_array x); +
    void alglib::rmatrixinverse( + real_2d_array& a, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::rmatrixinverse( + real_2d_array& a, + ae_int_t n, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Conjugate gradient results +Inversion of a matrix given by its LU decomposition. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - State - algorithm state + A - LU decomposition of the matrix + (output of RMatrixLU subroutine). + Pivots - table of permutations + (the output of RMatrixLU subroutine). + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) OUTPUT PARAMETERS: - X - array[0..N-1], solution - Rep - optimization report: - * Rep.TerminationType completetion code: - * -8 internal integrity control detected infinite - or NAN values in function/gradient. Abnormal - termination signalled. - * -7 gradient verification failed. - See MinCGSetGradientCheck() for more information. - * 1 relative function improvement is no more than - EpsF. - * 2 relative step is no more than EpsX. - * 4 gradient norm is no more than EpsG - * 5 MaxIts steps was taken - * 7 stopping conditions are too stringent, - further improvement is impossible, - we return best X found so far - * 8 terminated by user - * Rep.IterationsCount contains iterations count - * NFEV countains number of function calculations - - -- ALGLIB -- - Copyright 20.04.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mincgresults( - mincgstate state, - real_1d_array& x, - mincgreport& rep); + Info - return code: + * -3 A is singular, or VERY close to singular. + it is filled by zeros in such cases. + * 1 task is solved (but matrix A may be ill-conditioned, + check R1/RInf parameters for condition numbers). + Rep - solver report, see below for more info + A - inverse of matrix A. + Array whose indexes range within [0..N-1, 0..N-1]. -
    -

    Examples:   [1]  [2]  [3]  [4]  

    - -
    -
    /************************************************************************* -Conjugate gradient results +SOLVER REPORT -Buffered implementation of MinCGResults(), which uses pre-allocated buffer -to store X[]. If buffer size is too small, it resizes buffer. It is -intended to be used in the inner cycles of performance critical algorithms -where array reallocation penalty is too large to be ignored. +Subroutine sets following fields of the Rep structure: +* R1 reciprocal of condition number: 1/cond(A), 1-norm. +* RInf reciprocal of condition number: 1/cond(A), inf-norm. - -- ALGLIB -- - Copyright 20.04.2009 by Bochkanov Sergey + -- ALGLIB routine -- + 05.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgresultsbuf( - mincgstate state, - real_1d_array& x, - mincgreport& rep); +
    void alglib::rmatrixluinverse( + real_2d_array& a, + integer_1d_array pivots, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::rmatrixluinverse( + real_2d_array& a, + integer_1d_array pivots, + ae_int_t n, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets CG algorithm. +Triangular matrix inverse (real) -INPUT PARAMETERS: - State - structure which stores algorithm state - CGType - algorithm type: - * -1 automatic selection of the best algorithm - * 0 DY (Dai and Yuan) algorithm - * 1 Hybrid DY-HS algorithm +The subroutine inverts the following types of matrices: + * upper triangular + * upper triangular with unit diagonal + * lower triangular + * lower triangular with unit diagonal - -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mincgsetcgtype(mincgstate state, ae_int_t cgtype); +In case of an upper (lower) triangular matrix, the inverse matrix will +also be upper (lower) triangular, and after the end of the algorithm, the +inverse matrix replaces the source matrix. The elements below (above) the +main diagonal are not changed by the algorithm. -
    - -
    -
    /************************************************************************* -This function sets stopping conditions for CG optimization algorithm. +If the matrix has a unit diagonal, the inverse matrix also has a unit +diagonal, and the diagonal elements are not passed to the algorithm. -INPUT PARAMETERS: - State - structure which stores algorithm state - EpsG - >=0 - The subroutine finishes its work if the condition - |v|<EpsG is satisfied, where: - * |.| means Euclidian norm - * v - scaled gradient vector, v[i]=g[i]*s[i] - * g - gradient - * s - scaling coefficients set by MinCGSetScale() - EpsF - >=0 - The subroutine finishes its work if on k+1-th iteration - the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} - is satisfied. - EpsX - >=0 - The subroutine finishes its work if on k+1-th iteration - the condition |v|<=EpsX is fulfilled, where: - * |.| means Euclidian norm - * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - ste pvector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinCGSetScale() - MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to -automatic stopping criterion selection (small EpsX). +Input parameters: + A - matrix, array[0..N-1, 0..N-1]. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + IsUpper - True, if the matrix is upper triangular. + IsUnit - diagonal type (optional): + * if True, matrix has unit diagonal (a[i,i] are NOT used) + * if False, matrix diagonal is arbitrary + * if not given, False is assumed + +Output parameters: + Info - same as for RMatrixLUInverse + Rep - same as for RMatrixLUInverse + A - same as for RMatrixLUInverse. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 05.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgsetcond( - mincgstate state, - double epsg, - double epsf, - double epsx, - ae_int_t maxits); +
    void alglib::rmatrixtrinverse( + real_2d_array& a, + bool isupper, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::rmatrixtrinverse( + real_2d_array& a, + ae_int_t n, + bool isupper, + bool isunit, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* +Inversion of a symmetric positive definite matrix which is given +by Cholesky decomposition. -This subroutine turns on verification of the user-supplied analytic -gradient: -* user calls this subroutine before optimization begins -* MinCGOptimize() is called -* prior to actual optimization, for each component of parameters being - optimized X[i] algorithm performs following steps: - * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], - where X[i] is i-th component of the initial point and S[i] is a scale - of i-th parameter - * F(X) is evaluated at these trial points - * we perform one more evaluation in the middle point of the interval - * we build cubic model using function values and derivatives at trial - points and we compare its prediction with actual value in the middle - point - * in case difference between prediction and actual value is higher than - some predetermined threshold, algorithm stops with completion code -7; - Rep.VarIdx is set to index of the parameter with incorrect derivative. -* after verification is over, algorithm proceeds to the actual optimization. - -NOTE 1: verification needs N (parameters count) gradient evaluations. It - is very costly and you should use it only for low dimensional - problems, when you want to be sure that you've correctly - calculated analytic derivatives. You should not use it in the - production code (unless you want to check derivatives provided by - some third party). - -NOTE 2: you should carefully choose TestStep. Value which is too large - (so large that function behaviour is significantly non-cubic) will - lead to false alarms. You may use different step for different - parameters by means of setting scale with MinCGSetScale(). + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -NOTE 3: this function may lead to false positives. In case it reports that - I-th derivative was calculated incorrectly, you may decrease test - step and try one more time - maybe your function changes too - sharply and your step is too large for such rapidly chanding - function. +Input parameters: + A - Cholesky decomposition of the matrix to be inverted: + A=U'*U or A = L*L'. + Output of SPDMatrixCholesky subroutine. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + IsUpper - storage type (optional): + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used/changed by + function + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used/changed by + function + * if not given, lower half is used. -INPUT PARAMETERS: - State - structure used to store algorithm state - TestStep - verification step: - * TestStep=0 turns verification off - * TestStep>0 activates verification +Output parameters: + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse - -- ALGLIB -- - Copyright 31.05.2012 by Bochkanov Sergey + -- ALGLIB routine -- + 10.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgsetgradientcheck(mincgstate state, double teststep); +
    void alglib::spdmatrixcholeskyinverse( + real_2d_array& a, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spdmatrixcholeskyinverse( + real_2d_array& a, + ae_int_t n, + bool isupper, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Modification of the preconditioner: preconditioning is turned off. +Inversion of a symmetric positive definite matrix. -INPUT PARAMETERS: - State - structure which stores algorithm state +Given an upper or lower triangle of a symmetric positive definite matrix, +the algorithm generates matrix A^-1 and saves the upper or lower triangle +depending on the input. -NOTE: you can change preconditioner "on the fly", during algorithm -iterations. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey +Input parameters: + A - matrix to be inverted (upper or lower triangle). + Array with elements [0..N-1,0..N-1]. + N - size of matrix A (optional) : + * if given, only principal NxN submatrix is processed and + overwritten. other elements are unchanged. + * if not given, size is automatically determined from + matrix size (A must be square matrix) + IsUpper - storage type (optional): + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used/changed by + function + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used/changed by + function + * if not given, both lower and upper triangles must be + filled. + +Output parameters: + Info - return code, same as in RMatrixLUInverse + Rep - solver report, same as in RMatrixLUInverse + A - inverse of matrix A, same as in RMatrixLUInverse + + -- ALGLIB routine -- + 10.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgsetprecdefault(mincgstate state); +
    void alglib::spdmatrixinverse( + real_2d_array& a, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spdmatrixinverse( + real_2d_array& a, + ae_int_t n, + bool isupper, + ae_int_t& info, + matinvreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -Modification of the preconditioner: diagonal of approximate Hessian is -used. - -INPUT PARAMETERS: - State - structure which stores algorithm state - D - diagonal of the approximate Hessian, array[0..N-1], - (if larger, only leading N elements are used). +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -NOTE: you can change preconditioner "on the fly", during algorithm -iterations. +using namespace alglib; -NOTE 2: D[i] should be positive. Exception will be thrown otherwise. -NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. +int main(int argc, char **argv) +{ + complex_2d_array a = "[[1i,-1],[1i,1]]"; + ae_int_t info; + matinvreport rep; + cmatrixinverse(a, info, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[-0.5i,-0.5i],[-0.5,0.5]] + printf("%.4f\n", double(rep.r1)); // EXPECTED: 0.5 + printf("%.4f\n", double(rep.rinf)); // EXPECTED: 0.5 + return 0; +} - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mincgsetprecdiag(mincgstate state, real_1d_array d); -
    - +
    -
    /************************************************************************* -Modification of the preconditioner: scale-based diagonal preconditioning. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -This preconditioning mode can be useful when you don't have approximate -diagonal of Hessian, but you know that your variables are badly scaled -(for example, one variable is in [1,10], and another in [1000,100000]), -and most part of the ill-conditioning comes from different scales of vars. +using namespace alglib; -In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), -can greatly improve convergence. -IMPRTANT: you should set scale of your variables with MinCGSetScale() call -(before or after MinCGSetPrecScale() call). Without knowledge of the scale -of your variables scale-based preconditioner will be just unit matrix. +int main(int argc, char **argv) +{ + complex_2d_array a = "[[2,1],[1,2]]"; + ae_int_t info; + matinvreport rep; + hpdmatrixinverse(a, info, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[0.666666,-0.333333],[-0.333333,0.666666]] + return 0; +} -INPUT PARAMETERS: - State - structure which stores algorithm state -NOTE: you can change preconditioner "on the fly", during algorithm -iterations. +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "linalg.h"
     
    -  -- ALGLIB --
    -     Copyright 13.10.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::mincgsetprecscale(mincgstate state); +using namespace alglib; -
    - -
    -
    /************************************************************************* -This function sets scaling coefficients for CG optimizer. -ALGLIB optimizers use scaling matrices to test stopping conditions (step -size and gradient are scaled before comparison with tolerances). Scale of -the I-th variable is a translation invariant measure of: -a) "how large" the variable is -b) how large the step should be to make significant changes in the function +int main(int argc, char **argv) +{ + real_2d_array a = "[[1,-1],[1,1]]"; + ae_int_t info; + matinvreport rep; + rmatrixinverse(a, info, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[0.5,0.5],[-0.5,0.5]] + printf("%.4f\n", double(rep.r1)); // EXPECTED: 0.5 + printf("%.4f\n", double(rep.rinf)); // EXPECTED: 0.5 + return 0; +} -Scaling is also used by finite difference variant of CG optimizer - step -along I-th axis is equal to DiffStep*S[I]. -In most optimizers (and in the CG too) scaling is NOT a form of -preconditioning. It just affects stopping conditions. You should set -preconditioner by separate call to one of the MinCGSetPrec...() functions. +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "linalg.h"
     
    -There  is  special  preconditioning  mode, however,  which  uses   scaling
    -coefficients to form diagonal preconditioning matrix. You  can  turn  this
    -mode on, if you want.   But  you should understand that scaling is not the
    -same thing as preconditioning - these are two different, although  related
    -forms of tuning solver.
    +using namespace alglib;
     
    -INPUT PARAMETERS:
    -    State   -   structure stores algorithm state
    -    S       -   array[N], non-zero scaling coefficients
    -                S[i] may be negative, sign doesn't matter.
     
    -  -- ALGLIB --
    -     Copyright 14.01.2011 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::mincgsetscale(mincgstate state, real_1d_array s); +int main(int argc, char **argv) +{ + real_2d_array a = "[[2,1],[1,2]]"; + ae_int_t info; + matinvreport rep; + spdmatrixinverse(a, info, rep); + printf("%d\n", int(info)); // EXPECTED: 1 + printf("%s\n", a.tostring(4).c_str()); // EXPECTED: [[0.666666,-0.333333],[-0.333333,0.666666]] + return 0; +} -
    - + + +
    + +mcpdreport
    +mcpdstate
    + +mcpdaddbc
    +mcpdaddec
    +mcpdaddtrack
    +mcpdcreate
    +mcpdcreateentry
    +mcpdcreateentryexit
    +mcpdcreateexit
    +mcpdresults
    +mcpdsetbc
    +mcpdsetec
    +mcpdsetlc
    +mcpdsetpredictionweights
    +mcpdsetprior
    +mcpdsettikhonovregularizer
    +mcpdsolve
    + + + + +
    mcpd_simple1 Simple unconstrained MCPD model (no entry/exit states)
    mcpd_simple2 Simple MCPD model (no entry/exit states) with equality constraints
    +
     
    /************************************************************************* -This function sets maximum step length - -INPUT PARAMETERS: - State - structure which stores algorithm state - StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't - want to limit step length. - -Use this subroutine when you optimize target function which contains exp() -or other fast growing functions, and optimization algorithm makes too -large steps which leads to overflow. This function allows us to reject -steps that are too large (and therefore expose us to the possible -overflow) without actually calculating function value at the x+stp*d. +This structure is a MCPD training report: + InnerIterationsCount - number of inner iterations of the + underlying optimization algorithm + OuterIterationsCount - number of outer iterations of the + underlying optimization algorithm + NFEV - number of merit function evaluations + TerminationType - termination type + (same as for MinBLEIC optimizer, positive + values denote success, negative ones - + failure) -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgsetstpmax(mincgstate state, double stpmax); +
    class mcpdreport +{ + ae_int_t inneriterationscount; + ae_int_t outeriterationscount; + ae_int_t nfev; + ae_int_t terminationtype; +};
    - +
     
    /************************************************************************* -This function turns on/off reporting. - -INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not +This structure is a MCPD (Markov Chains for Population Data) solver. -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinCGOptimize(). +You should use ALGLIB functions in order to work with this object. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgsetxrep(mincgstate state, bool needxrep); +
    class mcpdstate +{ +};
    - +
     
    /************************************************************************* -This function allows to suggest initial step length to the CG algorithm. - -Suggested step length is used as starting point for the line search. It -can be useful when you have badly scaled problem, i.e. when ||grad|| -(which is used as initial estimate for the first step) is many orders of -magnitude different from the desired step. +This function is used to add bound constraints on the elements of the +transition matrix P. -Line search may fail on such problems without good estimate of initial -step length. Imagine, for example, problem with ||grad||=10^50 and desired -step equal to 0.1 Line search function will use 10^50 as initial step, -then it will decrease step length by 2 (up to 20 attempts) and will get -10^44, which is still too large. +MCPD solver has four types of constraints which can be placed on P: +* user-specified equality constraints (optional) +* user-specified bound constraints (optional) +* user-specified general linear constraints (optional) +* basic constraints (always present): + * non-negativity: P[i,j]>=0 + * consistency: every column of P sums to 1.0 -This function allows us to tell than line search should be started from -some moderate step length, like 1.0, so algorithm will be able to detect -desired step length in a several searches. +Final constraints which are passed to the underlying optimizer are +calculated as intersection of all present constraints. For example, you +may specify boundary constraint on P[0,0] and equality one: + 0.1<=P[0,0]<=0.9 + P[0,0]=0.5 +Such combination of constraints will be silently reduced to their +intersection, which is P[0,0]=0.5. -Default behavior (when no step is suggested) is to use preconditioner, if -it is available, to generate initial estimate of step length. +This function can be used to ADD bound constraint for one element of P +without changing constraints for other elements. -This function influences only first iteration of algorithm. It should be -called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call. -Suggested step is ignored if you have preconditioner. +You can also use MCPDSetBC() function which allows to place bound +constraints on arbitrary subset of elements of P. Set of constraints is +specified by BndL/BndU matrices, which may contain arbitrary combination +of finite numbers or infinities (like -INF<x<=0.5 or 0.1<=x<+INF). + +These functions (MCPDSetBC and MCPDAddBC) interact as follows: +* there is internal matrix of bound constraints which is stored in the + MCPD solver +* MCPDSetBC() replaces this matrix by another one (SET) +* MCPDAddBC() modifies one element of this matrix and leaves other ones + unchanged (ADD) +* thus MCPDAddBC() call preserves all modifications done by previous + calls, while MCPDSetBC() completely discards all changes done to the + equality constraints. INPUT PARAMETERS: - State - structure used to store algorithm state. - Stp - initial estimate of the step length. - Can be zero (no estimate). + S - solver + I - row index of element being constrained + J - column index of element being constrained + BndL - lower bound + BndU - upper bound -- ALGLIB -- - Copyright 30.07.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mincgsuggeststep(mincgstate state, double stp); +
    void alglib::mcpdaddbc( + mcpdstate s, + ae_int_t i, + ae_int_t j, + double bndl, + double bndu, + const xparams _params = alglib::xdefault);
    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +This function is used to add equality constraints on the elements of the +transition matrix P. -using namespace alglib; -void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) -{ - // - // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4 - // and its derivatives df/d0 and df/dx1 - // - func = 100*pow(x[0]+3,4) + pow(x[1]-3,4); - grad[0] = 400*pow(x[0]+3,3); - grad[1] = 4*pow(x[1]-3,3); -} +MCPD solver has four types of constraints which can be placed on P: +* user-specified equality constraints (optional) +* user-specified bound constraints (optional) +* user-specified general linear constraints (optional) +* basic constraints (always present): + * non-negativity: P[i,j]>=0 + * consistency: every column of P sums to 1.0 -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4 - // with nonlinear conjugate gradient method. - // - real_1d_array x = "[0,0]"; - double epsg = 0.0000000001; - double epsf = 0; - double epsx = 0; - ae_int_t maxits = 0; - mincgstate state; - mincgreport rep; +Final constraints which are passed to the underlying optimizer are +calculated as intersection of all present constraints. For example, you +may specify boundary constraint on P[0,0] and equality one: + 0.1<=P[0,0]<=0.9 + P[0,0]=0.5 +Such combination of constraints will be silently reduced to their +intersection, which is P[0,0]=0.5. - mincgcreate(x, state); - mincgsetcond(state, epsg, epsf, epsx, maxits); - alglib::mincgoptimize(state, function1_grad); - mincgresults(state, x, rep); +This function can be used to ADD equality constraint for one element of P +without changing constraints for other elements. - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3] - return 0; -} +You can also use MCPDSetEC() function which allows you to specify +arbitrary set of equality constraints in one call. +These functions (MCPDSetEC and MCPDAddEC) interact as follows: +* there is internal matrix of equality constraints which is stored in the + MCPD solver +* MCPDSetEC() replaces this matrix by another one (SET) +* MCPDAddEC() modifies one element of this matrix and leaves other ones + unchanged (ADD) +* thus MCPDAddEC() call preserves all modifications done by previous + calls, while MCPDSetEC() completely discards all changes done to the + equality constraints. -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +INPUT PARAMETERS:
    +    S       -   solver
    +    I       -   row index of element being constrained
    +    J       -   column index of element being constrained
    +    C       -   value (constraint for P[I,J]).  Can  be  either  NAN  (no
    +                constraint) or finite value from [0,1].
     
    -using namespace alglib;
    -void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
    -{
    -    //
    -    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    -    // and its derivatives df/d0 and df/dx1
    -    //
    -    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    -    grad[0] = 400*pow(x[0]+3,3);
    -    grad[1] = 4*pow(x[1]-3,3);
    -}
    +NOTES:
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    -    // with nonlinear conjugate gradient method.
    -    //
    -    // Several advanced techniques are demonstrated:
    -    // * upper limit on step size
    -    // * restart from new point
    -    //
    -    real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    -    double epsf = 0;
    -    double epsx = 0;
    -    double stpmax = 0.1;
    -    ae_int_t maxits = 0;
    -    mincgstate state;
    -    mincgreport rep;
    +1. infinite values of C  will lead to exception being thrown. Values  less
    +than 0.0 or greater than 1.0 will lead to error code being returned  after
    +call to MCPDSolve().
     
    -    // first run
    -    mincgcreate(x, state);
    -    mincgsetcond(state, epsg, epsf, epsx, maxits);
    -    mincgsetstpmax(state, stpmax);
    -    alglib::mincgoptimize(state, function1_grad);
    -    mincgresults(state, x, rep);
    +  -- ALGLIB --
    +     Copyright 23.05.2010 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::mcpdaddec( + mcpdstate s, + ae_int_t i, + ae_int_t j, + double c, + const xparams _params = alglib::xdefault); - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3] +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function is used to add a track - sequence of system states at the +different moments of its evolution. - // second run - algorithm is restarted with mincgrestartfrom() - x = "[10,10]"; - mincgrestartfrom(state, x); - alglib::mincgoptimize(state, function1_grad); - mincgresults(state, x, rep); +You may add one or several tracks to the MCPD solver. In case you have +several tracks, they won't overwrite each other. For example, if you pass +two tracks, A1-A2-A3 (system at t=A+1, t=A+2 and t=A+3) and B1-B2-B3, then +solver will try to model transitions from t=A+1 to t=A+2, t=A+2 to t=A+3, +t=B+1 to t=B+2, t=B+2 to t=B+3. But it WONT mix these two tracks - i.e. it +wont try to model transition from t=A+3 to t=B+1. - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3] - return 0; -} +INPUT PARAMETERS: + S - solver + XY - track, array[K,N]: + * I-th row is a state at t=I + * elements of XY must be non-negative (exception will be + thrown on negative elements) + K - number of points in a track + * if given, only leading K rows of XY are used + * if not given, automatically determined from size of XY + +NOTES: + +1. Track may contain either proportional or population data: + * with proportional data all rows of XY must sum to 1.0, i.e. we have + proportions instead of absolute population values + * with population data rows of XY contain population counts and generally + do not sum to 1.0 (although they still must be non-negative) + -- ALGLIB -- + Copyright 23.05.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mcpdaddtrack( + mcpdstate s, + real_2d_array xy, + const xparams _params = alglib::xdefault); +void alglib::mcpdaddtrack( + mcpdstate s, + real_2d_array xy, + ae_int_t k, + const xparams _params = alglib::xdefault); -
    + +

    Examples:   [1]  [2]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +DESCRIPTION: -using namespace alglib; -void s1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) -{ - // - // this callback calculates f(x) = (1+x)^(-0.2) + (1-x)^(-0.3) + 1000*x and its gradient. - // - // function is trimmed when we calculate it near the singular points or outside of the [-1,+1]. - // Note that we do NOT calculate gradient in this case. - // - if( (x[0]<=-0.999999999999) || (x[0]>=+0.999999999999) ) - { - func = 1.0E+300; - return; - } - func = pow(1+x[0],-0.2) + pow(1-x[0],-0.3) + 1000*x[0]; - grad[0] = -0.2*pow(1+x[0],-1.2) +0.3*pow(1-x[0],-1.3) + 1000; -} +This function creates MCPD (Markov Chains for Population Data) solver. -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of f(x) = (1+x)^(-0.2) + (1-x)^(-0.3) + 1000*x. - // This function has singularities at the boundary of the [-1,+1], but technique called - // "function trimming" allows us to solve this optimization problem. - // - // See http://www.alglib.net/optimization/tipsandtricks.php#ftrimming for more information - // on this subject. - // - real_1d_array x = "[0]"; - double epsg = 1.0e-6; - double epsf = 0; - double epsx = 0; - ae_int_t maxits = 0; - mincgstate state; - mincgreport rep; +This solver can be used to find transition matrix P for N-dimensional +prediction problem where transition from X[i] to X[i+1] is modelled as + X[i+1] = P*X[i] +where X[i] and X[i+1] are N-dimensional population vectors (components of +each X are non-negative), and P is a N*N transition matrix (elements of P +are non-negative, each column sums to 1.0). - mincgcreate(x, state); - mincgsetcond(state, epsg, epsf, epsx, maxits); - alglib::mincgoptimize(state, s1_grad); - mincgresults(state, x, rep); +Such models arise when when: +* there is some population of individuals +* individuals can have different states +* individuals can transit from one state to another +* population size is constant, i.e. there is no new individuals and no one + leaves population +* you want to model transitions of individuals from one state into another - printf("%s\n", x.tostring(5).c_str()); // EXPECTED: [-0.99917305] - return 0; -} +USAGE: +Here we give very brief outline of the MCPD. We strongly recommend you to +read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide +on data analysis which is available at http://www.alglib.net/dataanalysis/ -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +1. User initializes algorithm state with MCPDCreate() call
     
    -using namespace alglib;
    -void function1_func(const real_1d_array &x, double &func, void *ptr)
    -{
    -    //
    -    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    -    //
    -    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    -}
    +2. User  adds  one  or  more  tracks -  sequences of states which describe
    +   evolution of a system being modelled from different starting conditions
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    -    // using numerical differentiation to calculate gradient.
    -    //
    -    real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    -    double epsf = 0;
    -    double epsx = 0;
    -    double diffstep = 1.0e-6;
    -    ae_int_t maxits = 0;
    -    mincgstate state;
    -    mincgreport rep;
    +3. User may add optional boundary, equality  and/or  linear constraints on
    +   the coefficients of P by calling one of the following functions:
    +   * MCPDSetEC() to set equality constraints
    +   * MCPDSetBC() to set bound constraints
    +   * MCPDSetLC() to set linear constraints
     
    -    mincgcreatef(x, diffstep, state);
    -    mincgsetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::mincgoptimize(state, function1_func);
    -    mincgresults(state, x, rep);
    +4. Optionally,  user  may  set  custom  weights  for prediction errors (by
    +   default, algorithm assigns non-equal, automatically chosen weights  for
    +   errors in the prediction of different components of X). It can be  done
    +   with a call of MCPDSetPredictionWeights() function.
     
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    -    return 0;
    -}
    +5. User calls MCPDSolve() function which takes algorithm  state and
    +   pointer (delegate, etc.) to callback function which calculates F/G.
     
    +6. User calls MCPDResults() to get solution
     
    -
    - - -
    -
    /************************************************************************* +INPUT PARAMETERS: + N - problem dimension, N>=1 + +OUTPUT PARAMETERS: + State - structure stores algorithm state + -- ALGLIB -- + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    class minasareport -{ - ae_int_t iterationscount; - ae_int_t nfev; - ae_int_t terminationtype; - ae_int_t activeconstraints; -}; +
    void alglib::mcpdcreate( + ae_int_t n, + mcpdstate& s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* +DESCRIPTION: -*************************************************************************/ -
    class minasastate -{ -}; +This function is a specialized version of MCPDCreate() function, and we +recommend you to read comments for this function for general information +about MCPD solver. -
    - -
    -
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +This function creates MCPD (Markov Chains for Population Data) solver +for "Entry-state" model, i.e. model where transition from X[i] to X[i+1] +is modelled as + X[i+1] = P*X[i] +where + X[i] and X[i+1] are N-dimensional state vectors + P is a N*N transition matrix +and one selected component of X[] is called "entry" state and is treated +in a special way: + system state always transits from "entry" state to some another state + system state can not transit from any state into "entry" state +Such conditions basically mean that row of P which corresponds to "entry" +state is zero. + +Such models arise when: +* there is some population of individuals +* individuals can have different states +* individuals can transit from one state to another +* population size is NOT constant - at every moment of time there is some + (unpredictable) amount of "new" individuals, which can transit into one + of the states at the next turn, but still no one leaves population +* you want to model transitions of individuals from one state into another +* but you do NOT want to predict amount of "new" individuals because it + does not depends on individuals already present (hence system can not + transit INTO entry state - it can only transit FROM it). + +This model is discussed in more details in the ALGLIB User Guide (see +http://www.alglib.net/dataanalysis/ for more data). + +INPUT PARAMETERS: + N - problem dimension, N>=2 + EntryState- index of entry state, in 0..N-1 + +OUTPUT PARAMETERS: + State - structure stores algorithm state -- ALGLIB -- - Copyright 25.03.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasacreate( - real_1d_array x, - real_1d_array bndl, - real_1d_array bndu, - minasastate& state); -void alglib::minasacreate( +
    void alglib::mcpdcreateentry( ae_int_t n, - real_1d_array x, - real_1d_array bndl, - real_1d_array bndu, - minasastate& state); + ae_int_t entrystate, + mcpdstate& s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This family of functions is used to launcn iterations of nonlinear optimizer +DESCRIPTION: -These functions accept following parameters: - state - algorithm state - grad - callback which calculates function (or merit function) - value func and gradient grad at given point x - rep - optional callback which is called after each iteration - can be NULL - ptr - optional pointer which is passed to func/grad/hess/jac/rep - can be NULL +This function is a specialized version of MCPDCreate() function, and we +recommend you to read comments for this function for general information +about MCPD solver. + +This function creates MCPD (Markov Chains for Population Data) solver +for "Entry-Exit-states" model, i.e. model where transition from X[i] to +X[i+1] is modelled as + X[i+1] = P*X[i] +where + X[i] and X[i+1] are N-dimensional state vectors + P is a N*N transition matrix +one selected component of X[] is called "entry" state and is treated in a +special way: + system state always transits from "entry" state to some another state + system state can not transit from any state into "entry" state +and another one component of X[] is called "exit" state and is treated in +a special way too: + system state can transit from any state into "exit" state + system state can not transit from "exit" state into any other state + transition operator discards "exit" state (makes it zero at each turn) +Such conditions basically mean that: + row of P which corresponds to "entry" state is zero + column of P which corresponds to "exit" state is zero +Multiplication by such P may decrease sum of vector components. +Such models arise when: +* there is some population of individuals +* individuals can have different states +* individuals can transit from one state to another +* population size is NOT constant +* at every moment of time there is some (unpredictable) amount of "new" + individuals, which can transit into one of the states at the next turn +* some individuals can move (predictably) into "exit" state and leave + population at the next turn +* you want to model transitions of individuals from one state into another, + including transitions from the "entry" state and into the "exit" state. +* but you do NOT want to predict amount of "new" individuals because it + does not depends on individuals already present (hence system can not + transit INTO entry state - it can only transit FROM it). - -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey -*************************************************************************/ -
    void minasaoptimize(minasastate &state, - void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -
    - -
    -
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +This model is discussed in more details in the ALGLIB User Guide (see +http://www.alglib.net/dataanalysis/ for more data). + +INPUT PARAMETERS: + N - problem dimension, N>=2 + EntryState- index of entry state, in 0..N-1 + ExitState- index of exit state, in 0..N-1 + +OUTPUT PARAMETERS: + State - structure stores algorithm state -- ALGLIB -- - Copyright 30.07.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasarestartfrom( - minasastate state, - real_1d_array x, - real_1d_array bndl, - real_1d_array bndu); +
    void alglib::mcpdcreateentryexit( + ae_int_t n, + ae_int_t entrystate, + ae_int_t exitstate, + mcpdstate& s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +DESCRIPTION: + +This function is a specialized version of MCPDCreate() function, and we +recommend you to read comments for this function for general information +about MCPD solver. + +This function creates MCPD (Markov Chains for Population Data) solver +for "Exit-state" model, i.e. model where transition from X[i] to X[i+1] +is modelled as + X[i+1] = P*X[i] +where + X[i] and X[i+1] are N-dimensional state vectors + P is a N*N transition matrix +and one selected component of X[] is called "exit" state and is treated +in a special way: + system state can transit from any state into "exit" state + system state can not transit from "exit" state into any other state + transition operator discards "exit" state (makes it zero at each turn) +Such conditions basically mean that column of P which corresponds to +"exit" state is zero. Multiplication by such P may decrease sum of vector +components. + +Such models arise when: +* there is some population of individuals +* individuals can have different states +* individuals can transit from one state to another +* population size is NOT constant - individuals can move into "exit" state + and leave population at the next turn, but there are no new individuals +* amount of individuals which leave population can be predicted +* you want to model transitions of individuals from one state into another + (including transitions into the "exit" state) + +This model is discussed in more details in the ALGLIB User Guide (see +http://www.alglib.net/dataanalysis/ for more data). + +INPUT PARAMETERS: + N - problem dimension, N>=2 + ExitState- index of exit state, in 0..N-1 + +OUTPUT PARAMETERS: + State - structure stores algorithm state -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasaresults( - minasastate state, - real_1d_array& x, - minasareport& rep); +
    void alglib::mcpdcreateexit( + ae_int_t n, + ae_int_t exitstate, + mcpdstate& s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +MCPD results + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + P - array[N,N], transition matrix + Rep - optimization report. You should check Rep.TerminationType + in order to distinguish successful termination from + unsuccessful one. Speaking short, positive values denote + success, negative ones are failures. + More information about fields of this structure can be + found in the comments on MCPDReport datatype. + -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasaresultsbuf( - minasastate state, - real_1d_array& x, - minasareport& rep); +
    void alglib::mcpdresults( + mcpdstate s, + real_2d_array& p, + mcpdreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +This function is used to add bound constraints on the elements of the +transition matrix P. + +MCPD solver has four types of constraints which can be placed on P: +* user-specified equality constraints (optional) +* user-specified bound constraints (optional) +* user-specified general linear constraints (optional) +* basic constraints (always present): + * non-negativity: P[i,j]>=0 + * consistency: every column of P sums to 1.0 + +Final constraints which are passed to the underlying optimizer are +calculated as intersection of all present constraints. For example, you +may specify boundary constraint on P[0,0] and equality one: + 0.1<=P[0,0]<=0.9 + P[0,0]=0.5 +Such combination of constraints will be silently reduced to their +intersection, which is P[0,0]=0.5. + +This function can be used to place bound constraints on arbitrary +subset of elements of P. Set of constraints is specified by BndL/BndU +matrices, which may contain arbitrary combination of finite numbers or +infinities (like -INF<x<=0.5 or 0.1<=x<+INF). + +You can also use MCPDAddBC() function which allows to ADD bound constraint +for one element of P without changing constraints for other elements. + +These functions (MCPDSetBC and MCPDAddBC) interact as follows: +* there is internal matrix of bound constraints which is stored in the + MCPD solver +* MCPDSetBC() replaces this matrix by another one (SET) +* MCPDAddBC() modifies one element of this matrix and leaves other ones + unchanged (ADD) +* thus MCPDAddBC() call preserves all modifications done by previous + calls, while MCPDSetBC() completely discards all changes done to the + equality constraints. + +INPUT PARAMETERS: + S - solver + BndL - lower bounds constraints, array[N,N]. Elements of BndL can + be finite numbers or -INF. + BndU - upper bounds constraints, array[N,N]. Elements of BndU can + be finite numbers or +INF. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasasetalgorithm(minasastate state, ae_int_t algotype); +
    void alglib::mcpdsetbc( + mcpdstate s, + real_2d_array bndl, + real_2d_array bndu, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +This function is used to add equality constraints on the elements of the +transition matrix P. + +MCPD solver has four types of constraints which can be placed on P: +* user-specified equality constraints (optional) +* user-specified bound constraints (optional) +* user-specified general linear constraints (optional) +* basic constraints (always present): + * non-negativity: P[i,j]>=0 + * consistency: every column of P sums to 1.0 + +Final constraints which are passed to the underlying optimizer are +calculated as intersection of all present constraints. For example, you +may specify boundary constraint on P[0,0] and equality one: + 0.1<=P[0,0]<=0.9 + P[0,0]=0.5 +Such combination of constraints will be silently reduced to their +intersection, which is P[0,0]=0.5. + +This function can be used to place equality constraints on arbitrary +subset of elements of P. Set of constraints is specified by EC, which may +contain either NAN's or finite numbers from [0,1]. NAN denotes absence of +constraint, finite number denotes equality constraint on specific element +of P. + +You can also use MCPDAddEC() function which allows to ADD equality +constraint for one element of P without changing constraints for other +elements. + +These functions (MCPDSetEC and MCPDAddEC) interact as follows: +* there is internal matrix of equality constraints which is stored in the + MCPD solver +* MCPDSetEC() replaces this matrix by another one (SET) +* MCPDAddEC() modifies one element of this matrix and leaves other ones + unchanged (ADD) +* thus MCPDAddEC() call preserves all modifications done by previous + calls, while MCPDSetEC() completely discards all changes done to the + equality constraints. + +INPUT PARAMETERS: + S - solver + EC - equality constraints, array[N,N]. Elements of EC can be + either NAN's or finite numbers from [0,1]. NAN denotes + absence of constraints, while finite value denotes + equality constraint on the corresponding element of P. + +NOTES: + +1. infinite values of EC will lead to exception being thrown. Values less +than 0.0 or greater than 1.0 will lead to error code being returned after +call to MCPDSolve(). -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasasetcond( - minasastate state, - double epsg, - double epsf, - double epsx, - ae_int_t maxits); +
    void alglib::mcpdsetec( + mcpdstate s, + real_2d_array ec, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +This function is used to set linear equality/inequality constraints on the +elements of the transition matrix P. + +This function can be used to set one or several general linear constraints +on the elements of P. Two types of constraints are supported: +* equality constraints +* inequality constraints (both less-or-equal and greater-or-equal) + +Coefficients of constraints are specified by matrix C (one of the +parameters). One row of C corresponds to one constraint. Because +transition matrix P has N*N elements, we need N*N columns to store all +coefficients (they are stored row by row), and one more column to store +right part - hence C has N*N+1 columns. Constraint kind is stored in the +CT array. + +Thus, I-th linear constraint is + P[0,0]*C[I,0] + P[0,1]*C[I,1] + .. + P[0,N-1]*C[I,N-1] + + + P[1,0]*C[I,N] + P[1,1]*C[I,N+1] + ... + + + P[N-1,N-1]*C[I,N*N-1] ?=? C[I,N*N] +where ?=? can be either "=" (CT[i]=0), "<=" (CT[i]<0) or ">=" (CT[i]>0). + +Your constraint may involve only some subset of P (less than N*N elements). +For example it can be something like + P[0,0] + P[0,1] = 0.5 +In this case you still should pass matrix with N*N+1 columns, but all its +elements (except for C[0,0], C[0,1] and C[0,N*N-1]) will be zero. + +INPUT PARAMETERS: + S - solver + C - array[K,N*N+1] - coefficients of constraints + (see above for complete description) + CT - array[K] - constraint types + (see above for complete description) + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasasetstpmax(minasastate state, double stpmax); +
    void alglib::mcpdsetlc( + mcpdstate s, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::mcpdsetlc( + mcpdstate s, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Obsolete optimization algorithm. -Was replaced by MinBLEIC subpackage. +This function is used to change prediction weights + +MCPD solver scales prediction errors as follows + Error(P) = ||W*(y-P*x)||^2 +where + x is a system state at time t + y is a system state at time t+1 + P is a transition matrix + W is a diagonal scaling matrix + +By default, weights are chosen in order to minimize relative prediction +error instead of absolute one. For example, if one component of state is +about 0.5 in magnitude and another one is about 0.05, then algorithm will +make corresponding weights equal to 2.0 and 20.0. + +INPUT PARAMETERS: + S - solver + PW - array[N], weights: + * must be non-negative values (exception will be thrown otherwise) + * zero values will be replaced by automatically chosen values -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minasasetxrep(minasastate state, bool needxrep); +
    void alglib::mcpdsetpredictionweights( + mcpdstate s, + real_1d_array pw, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This is obsolete function which was used by previous version of the BLEIC -optimizer. It does nothing in the current version of BLEIC. +This function allows to set prior values used for regularization of your +problem. + +By default, regularizing term is equal to r*||P-prior_P||^2, where r is a +small non-zero value, P is transition matrix, prior_P is identity matrix, +||X||^2 is a sum of squared elements of X. + +This function allows you to change prior values prior_P. You can also +change r with MCPDSetTikhonovRegularizer() function. + +INPUT PARAMETERS: + S - solver + PP - array[N,N], matrix of prior values: + 1. elements must be real numbers from [0,1] + 2. columns must sum to 1.0. + First property is checked (exception is thrown otherwise), + while second one is not checked/enforced. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetbarrierdecay(minbleicstate state, double mudecay); +
    void alglib::mcpdsetprior( + mcpdstate s, + real_2d_array pp, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This is obsolete function which was used by previous version of the BLEIC -optimizer. It does nothing in the current version of BLEIC. +This function allows to tune amount of Tikhonov regularization being +applied to your problem. + +By default, regularizing term is equal to r*||P-prior_P||^2, where r is a +small non-zero value, P is transition matrix, prior_P is identity matrix, +||X||^2 is a sum of squared elements of X. + +This function allows you to change coefficient r. You can also change +prior values with MCPDSetPrior() function. + +INPUT PARAMETERS: + S - solver + V - regularization coefficient, finite non-negative value. It + is not recommended to specify zero value unless you are + pretty sure that you want it. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minbleicsetbarrierwidth(minbleicstate state, double mu); +
    void alglib::mcpdsettikhonovregularizer( + mcpdstate s, + double v, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead. +This function is used to start solution of the MCPD problem. + +After return from this function, you can use MCPDResults() to get solution +and completion code. -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey + Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetcholeskypreconditioner( - minlbfgsstate state, - real_2d_array p, - bool isupper); +
    void alglib::mcpdsolve( + mcpdstate s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
    -
    /************************************************************************* -Obsolete function, use MinLBFGSSetPrecDefault() instead. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minlbfgssetdefaultpreconditioner(minlbfgsstate state); +using namespace alglib; -
    - + +int main(int argc, char **argv) +{ + // + // The very simple MCPD example + // + // We have a loan portfolio. Our loans can be in one of two states: + // * normal loans ("good" ones) + // * past due loans ("bad" ones) + // + // We assume that: + // * loans can transition from any state to any other state. In + // particular, past due loan can become "good" one at any moment + // with same (fixed) probability. Not realistic, but it is toy example :) + // * portfolio size does not change over time + // + // Thus, we have following model + // state_new = P*state_old + // where + // ( p00 p01 ) + // P = ( ) + // ( p10 p11 ) + // + // We want to model transitions between these two states using MCPD + // approach (Markov Chains for Proportional/Population Data), i.e. + // to restore hidden transition matrix P using actual portfolio data. + // We have: + // * poportional data, i.e. proportion of loans in the normal and past + // due states (not portfolio size measured in some currency, although + // it is possible to work with population data too) + // * two tracks, i.e. two sequences which describe portfolio + // evolution from two different starting states: [1,0] (all loans + // are "good") and [0.8,0.2] (only 80% of portfolio is in the "good" + // state) + // + mcpdstate s; + mcpdreport rep; + real_2d_array p; + real_2d_array track0 = "[[1.00000,0.00000],[0.95000,0.05000],[0.92750,0.07250],[0.91738,0.08263],[0.91282,0.08718]]"; + real_2d_array track1 = "[[0.80000,0.20000],[0.86000,0.14000],[0.88700,0.11300],[0.89915,0.10085]]"; + + mcpdcreate(2, s); + mcpdaddtrack(s, track0); + mcpdaddtrack(s, track1); + mcpdsolve(s); + mcpdresults(s, p, rep); + + // + // Hidden matrix P is equal to + // ( 0.95 0.50 ) + // ( ) + // ( 0.05 0.50 ) + // which means that "good" loans can become "bad" with 5% probability, + // while "bad" loans will return to good state with 50% probability. + // + printf("%s\n", p.tostring(2).c_str()); // EXPECTED: [[0.95,0.50],[0.05,0.50]] + return 0; +} + + + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // Simple MCPD example
    +    //
    +    // We have a loan portfolio. Our loans can be in one of three states:
    +    // * normal loans
    +    // * past due loans
    +    // * charged off loans
    +    //
    +    // We assume that:
    +    // * normal loan can stay normal or become past due (but not charged off)
    +    // * past due loan can stay past due, become normal or charged off
    +    // * charged off loan will stay charged off for the rest of eternity
    +    // * portfolio size does not change over time
    +    // Not realistic, but it is toy example :)
    +    //
    +    // Thus, we have following model
    +    //     state_new = P*state_old
    +    // where
    +    //         ( p00  p01    )
    +    //     P = ( p10  p11    )
    +    //         (      p21  1 )
    +    // i.e. four elements of P are known a priori.
    +    //
    +    // Although it is possible (given enough data) to In order to enforce 
    +    // this property we set equality constraints on these elements.
    +    //
    +    // We want to model transitions between these two states using MCPD
    +    // approach (Markov Chains for Proportional/Population Data), i.e.
    +    // to restore hidden transition matrix P using actual portfolio data.
    +    // We have:
    +    // * poportional data, i.e. proportion of loans in the current and past 
    +    //   due states (not portfolio size measured in some currency, although 
    +    //   it is possible to work with population data too)
    +    // * two tracks, i.e. two sequences which describe portfolio
    +    //   evolution from two different starting states: [1,0,0] (all loans 
    +    //   are "good") and [0.8,0.2,0.0] (only 80% of portfolio is in the "good"
    +    //   state)
    +    //
    +    mcpdstate s;
    +    mcpdreport rep;
    +    real_2d_array p;
    +    real_2d_array track0 = "[[1.000000,0.000000,0.000000],[0.950000,0.050000,0.000000],[0.927500,0.060000,0.012500],[0.911125,0.061375,0.027500],[0.896256,0.060900,0.042844]]";
    +    real_2d_array track1 = "[[0.800000,0.200000,0.000000],[0.860000,0.090000,0.050000],[0.862000,0.065500,0.072500],[0.851650,0.059475,0.088875],[0.838805,0.057451,0.103744]]";
    +
    +    mcpdcreate(3, s);
    +    mcpdaddtrack(s, track0);
    +    mcpdaddtrack(s, track1);
    +    mcpdaddec(s, 0, 2, 0.0);
    +    mcpdaddec(s, 1, 2, 0.0);
    +    mcpdaddec(s, 2, 2, 1.0);
    +    mcpdaddec(s, 2, 0, 0.0);
    +    mcpdsolve(s);
    +    mcpdresults(s, p, rep);
    +
    +    //
    +    // Hidden matrix P is equal to
    +    //         ( 0.95 0.50      )
    +    //         ( 0.05 0.25      )
    +    //         (      0.25 1.00 ) 
    +    // which means that "good" loans can become past due with 5% probability, 
    +    // while past due loans will become charged off with 25% probability or
    +    // return back to normal state with 50% probability.
    +    //
    +    printf("%s\n", p.tostring(2).c_str()); // EXPECTED: [[0.95,0.50,0.00],[0.05,0.25,0.00],[0.00,0.25,1.00]]
    +    return 0;
    +}
    +
    +
    +
    - +
     
    /************************************************************************* This structure stores optimization report: -* IterationsCount total number of inner iterations -* NFEV number of gradient evaluations -* TerminationType termination type (see below) +* iterationscount number of iterations +* nfev number of gradient evaluations +* terminationtype termination type (see below) TERMINATION CODES -TerminationType field contains completion code, which can be: +terminationtype field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. - -7 gradient verification failed. - See MinLBFGSSetGradientCheck() for more information. + -3 inconsistent constraints. 1 relative function improvement is no more than EpsF. 2 relative step is no more than EpsX. 4 gradient norm is no more than EpsG @@ -24295,13 +25307,11 @@ 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. - 8 terminated by user who called minlbfgsrequesttermination(). - X contains point which was "current accepted" when termination - request was submitted. - -Other fields of this structure are not documented and should not be used! -*************************************************************************/ -
    class minlbfgsreport + 8 terminated by user who called minbcrequesttermination(). X contains + point which was "current accepted" when termination request was + submitted. +*************************************************************************/ +
    class minbcreport { ae_int_t iterationscount; ae_int_t nfev; @@ -24310,104 +25320,107 @@ };
    - +
     
    /************************************************************************* - +This object stores nonlinear optimizer state. +You should use functions provided by MinBC subpackage to work with this +object *************************************************************************/ -
    class minlbfgsstate +
    class minbcstate { };
    - +
     
    /************************************************************************* - LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION + BOX CONSTRAINED OPTIMIZATION + WITH FAST ACTIVATION OF MULTIPLE BOX CONSTRAINTS DESCRIPTION: -The subroutine minimizes function F(x) of N arguments by using a quasi- -Newton method (LBFGS scheme) which is optimized to use a minimum amount -of memory. -The subroutine generates the approximation of an inverse Hessian matrix by -using information about the last M steps of the algorithm (instead of N). -It lessens a required amount of memory from a value of order N^2 to a -value of order 2*N*M. +The subroutine minimizes function F(x) of N arguments subject to box +constraints (with some of box constraints actually being equality ones). +This optimizer uses algorithm similar to that of MinBLEIC (optimizer with +general linear constraints), but presence of box-only constraints allows +us to use faster constraint activation strategies. On large-scale problems, +with multiple constraints active at the solution, this optimizer can be +several times faster than BLEIC. REQUIREMENTS: -Algorithm will request following information during its operation: -* function value F and its gradient G (simultaneously) at given point X - +* user must provide function value and gradient +* starting point X0 must be feasible or + not too far away from the feasible set +* grad(f) must be Lipschitz continuous on a level set: + L = { x : f(x)<=f(x0) } +* function must be defined everywhere on the feasible set F USAGE: -1. User initializes algorithm state with MinLBFGSCreate() call -2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() - and other functions -3. User calls MinLBFGSOptimize() function which takes algorithm state and - pointer (delegate, etc.) to callback function which calculates F/G. -4. User calls MinLBFGSResults() to get solution -5. Optionally user may call MinLBFGSRestartFrom() to solve another problem - with same N/M but another starting point and/or another function. - MinLBFGSRestartFrom() allows to reuse already initialized structure. +Constrained optimization if far more complex than the unconstrained one. +Here we give very brief outline of the BC optimizer. We strongly recommend +you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide +on optimization, which is available at http://www.alglib.net/optimization/ -INPUT PARAMETERS: - N - problem dimension. N>0 - M - number of corrections in the BFGS scheme of Hessian - approximation update. Recommended value: 3<=M<=7. The smaller - value causes worse convergence, the bigger will not cause a - considerably better convergence, but will cause a fall in the - performance. M<=N. - X - initial solution approximation, array[0..N-1]. +1. User initializes algorithm state with MinBCCreate() call +2. USer adds box constraints by calling MinBCSetBC() function. -OUTPUT PARAMETERS: - State - structure which stores algorithm state +3. User sets stopping conditions with MinBCSetCond(). +4. User calls MinBCOptimize() function which takes algorithm state and + pointer (delegate, etc.) to callback function which calculates F/G. -NOTES: -1. you may tune stopping conditions with MinLBFGSSetCond() function -2. if target function contains exp() or other fast growing functions, and - optimization algorithm makes too large steps which leads to overflow, - use MinLBFGSSetStpMax() function to bound algorithm's steps. However, - L-BFGS rarely needs such a tuning. +5. User calls MinBCResults() to get solution + +6. Optionally user may call MinBCRestartFrom() to solve another problem + with same N but another starting point. + MinBCRestartFrom() allows to reuse already initialized structure. + + +INPUT PARAMETERS: + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size ofX + X - starting point, array[N]: + * it is better to set X to a feasible point + * but X can be infeasible, in which case algorithm will try + to find feasible point first, using X as initial + approximation. +OUTPUT PARAMETERS: + State - structure stores algorithm state -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgscreate( - ae_int_t m, +
    void alglib::minbccreate( real_1d_array x, - minlbfgsstate& state); -void alglib::minlbfgscreate( + minbcstate& state, + const xparams _params = alglib::xdefault); +void alglib::minbccreate( ae_int_t n, - ae_int_t m, real_1d_array x, - minlbfgsstate& state); + minbcstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -The subroutine is finite difference variant of MinLBFGSCreate(). It uses +The subroutine is finite difference variant of MinBCCreate(). It uses finite differences in order to differentiate target function. Description below contains information which is specific to this function -only. We recommend to read comments on MinLBFGSCreate() in order to get -more information about creation of LBFGS optimizer. +only. We recommend to read comments on MinBCCreate() in order to get +more information about creation of BC optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used * if not given, automatically determined from size of X - M - number of corrections in the BFGS scheme of Hessian - approximation update. Recommended value: 3<=M<=7. The smaller - value causes worse convergence, the bigger will not cause a - considerably better convergence, but will cause a fall in the - performance. M<=N. X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 @@ -24417,7 +25430,7 @@ NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where - S[] is scaling vector which can be set by MinLBFGSSetScale() call. + S[] is scaling vector which can be set by MinBCSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too large step will result in too large truncation errors, while too small step will result in too large numerical errors. 1.0E-6 can be good @@ -24428,9 +25441,9 @@ However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation - is less robust and precise. LBFGS needs exact gradient values. - Imprecise gradient may slow down convergence, especially on highly - nonlinear problems. + is less robust and precise. CG needs exact gradient values. Imprecise + gradient may slow down convergence, especially on highly nonlinear + problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. @@ -24438,21 +25451,353 @@ -- ALGLIB -- Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgscreatef( - ae_int_t m, +
    void alglib::minbccreatef( real_1d_array x, double diffstep, - minlbfgsstate& state); -void alglib::minlbfgscreatef( + minbcstate& state, + const xparams _params = alglib::xdefault); +void alglib::minbccreatef( ae_int_t n, - ae_int_t m, real_1d_array x, double diffstep, - minlbfgsstate& state); + minbcstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function activates/deactivates verification of the user-supplied +analytic gradient. + +Upon activation of this option OptGuard integrity checker performs +numerical differentiation of your target function at the initial point +(note: future versions may also perform check at the final point) and +compares numerical gradient with analytic one provided by you. + +If difference is too large, an error flag is set and optimization session +continues. After optimization session is over, you can retrieve the report +which stores both gradients and specific components highlighted as +suspicious by the OptGuard. + +The primary OptGuard report can be retrieved with minbcoptguardresults(). + +IMPORTANT: gradient check is a high-overhead option which will cost you + about 3*N additional function evaluations. In many cases it may + cost as much as the rest of the optimization session. + + YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO + CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. + +NOTE: unlike previous incarnation of the gradient checking code, OptGuard + does NOT interrupt optimization even if it discovers bad gradient. + +INPUT PARAMETERS: + State - structure used to store algorithm state + TestStep - verification step used for numerical differentiation: + * TestStep=0 turns verification off + * TestStep>0 activates verification + You should carefully choose TestStep. Value which is + too large (so large that function behavior is non- + cubic at this scale) will lead to false alarms. Too + short step will result in rounding errors dominating + numerical derivative. + + You may use different step for different parameters by + means of setting scale with minbcsetscale(). + +=== EXPLANATION ========================================================== + +In order to verify gradient algorithm performs following steps: + * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], + where X[i] is i-th component of the initial point and S[i] is a scale + of i-th parameter + * F(X) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point + + -- ALGLIB -- + Copyright 15.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbcoptguardgradient( + minbcstate state, + double teststep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #0 + +Nonsmoothness (non-C1) test #0 studies function values (not gradient!) +obtained during line searches and monitors behavior of the directional +derivative estimate. + +This test is less powerful than test #1, but it does not depend on the +gradient values and thus it is more robust against artifacts introduced by +numerical differentiation. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #0 "strong" report + lngrep - C1 test #0 "long" report + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbcoptguardnonc1test0results( + minbcstate state, + optguardnonc1test0report& strrep, + optguardnonc1test0report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #1 + +Nonsmoothness (non-C1) test #1 studies individual components of the +gradient computed during line search. + +When precise analytic gradient is provided this test is more powerful than +test #0 which works with function values and ignores user-provided +gradient. However, test #0 becomes more powerful when numerical +differentiation is employed (in such cases test #1 detects higher levels +of numerical noise and becomes too conservative). + +This test also tells specific components of the gradient which violate C1 +continuity, which makes it more informative than #0, which just tells that +continuity is violated. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* vidx - is an index of the variable in [0,N) with nonsmooth derivative +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], g[] - arrays of length CNT which store step lengths and gradient + values at these points; g[i] is evaluated in x0+stp[i]*d and contains + vidx-th component of the gradient. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #1 "strong" report + lngrep - C1 test #1 "long" report + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbcoptguardnonc1test1results( + minbcstate state, + optguardnonc1test1report& strrep, + optguardnonc1test1report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Results of OptGuard integrity check, should be called after optimization +session is over. + +=== PRIMARY REPORT ======================================================= + +OptGuard performs several checks which are intended to catch common errors +in the implementation of nonlinear function/gradient: +* incorrect analytic gradient +* discontinuous (non-C0) target functions (constraints) +* nonsmooth (non-C1) target functions (constraints) + +Each of these checks is activated with appropriate function: +* minbcoptguardgradient() for gradient verification +* minbcoptguardsmoothness() for C0/C1 checks + +Following flags are set when these errors are suspected: +* rep.badgradsuspected, and additionally: + * rep.badgradvidx for specific variable (gradient element) suspected + * rep.badgradxbase, a point where gradient is tested + * rep.badgraduser, user-provided gradient (stored as 2D matrix with + single row in order to make report structure compatible with more + complex optimizers like MinNLC or MinLM) + * rep.badgradnum, reference gradient obtained via numerical + differentiation (stored as 2D matrix with single row in order to make + report structure compatible with more complex optimizers like MinNLC + or MinLM) +* rep.nonc0suspected +* rep.nonc1suspected + +=== ADDITIONAL REPORTS/LOGS ============================================== + +Several different tests are performed to catch C0/C1 errors, you can find +out specific test signaled error by looking to: +* rep.nonc0test0positive, for non-C0 test #0 +* rep.nonc1test0positive, for non-C1 test #0 +* rep.nonc1test1positive, for non-C1 test #1 + +Additional information (including line search logs) can be obtained by +means of: +* minbcoptguardnonc1test0results() +* minbcoptguardnonc1test1results() +which return detailed error reports, specific points where discontinuities +were found, and so on. + +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + rep - generic OptGuard report; more detailed reports can be + retrieved with other functions. + +NOTE: false negatives (nonsmooth problems are not identified as nonsmooth + ones) are possible although unlikely. + + The reason is that you need to make several evaluations around + nonsmoothness in order to accumulate enough information about + function curvature. Say, if you start right from the nonsmooth point, + optimizer simply won't get enough data to understand what is going + wrong before it terminates due to abrupt changes in the derivative. + It is also possible that "unlucky" step will move us to the + termination too quickly. + + Our current approach is to have less than 0.1% false negatives in + our test examples (measured with multiple restarts from random + points), and to have exactly 0% false positives. + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbcoptguardresults( + minbcstate state, + optguardreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function activates/deactivates nonsmoothness monitoring option of +the OptGuard integrity checker. Smoothness monitor silently observes +solution process and tries to detect ill-posed problems, i.e. ones with: +a) discontinuous target function (non-C0) +b) nonsmooth target function (non-C1) + +Smoothness monitoring does NOT interrupt optimization even if it suspects +that your problem is nonsmooth. It just sets corresponding flags in the +OptGuard report which can be retrieved after optimization is over. + +Smoothness monitoring is a moderate overhead option which often adds less +than 1% to the optimizer running time. Thus, you can use it even for large +scale problems. + +NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 + continuity violations. + + First, minor errors are hard to catch - say, a 0.0001 difference in + the model values at two sides of the gap may be due to discontinuity + of the model - or simply because the model has changed. + + Second, C1-violations are especially difficult to detect in a + noninvasive way. The optimizer usually performs very short steps + near the nonsmoothness, and differentiation usually introduces a + lot of numerical noise. It is hard to tell whether some tiny + discontinuity in the slope is due to real nonsmoothness or just due + to numerical noise alone. + + Our top priority was to avoid false positives, so in some rare cases + minor errors may went unnoticed (however, in most cases they can be + spotted with restart from different initial point). + +INPUT PARAMETERS: + state - algorithm state + level - monitoring level: + * 0 - monitoring is disabled + * 1 - noninvasive low-overhead monitoring; function values + and/or gradients are recorded, but OptGuard does not + try to perform additional evaluations in order to + get more information about suspicious locations. + +=== EXPLANATION ========================================================== + +One major source of headache during optimization is the possibility of +the coding errors in the target function/constraints (or their gradients). +Such errors most often manifest themselves as discontinuity or +nonsmoothness of the target/constraints. + +Another frequent situation is when you try to optimize something involving +lots of min() and max() operations, i.e. nonsmooth target. Although not a +coding error, it is nonsmoothness anyway - and smooth optimizers usually +stop right after encountering nonsmoothness, well before reaching solution. + +OptGuard integrity checker helps you to catch such situations: it monitors +function values/gradients being passed to the optimizer and tries to +errors. Upon discovering suspicious pair of points it raises appropriate +flag (and allows you to continue optimization). When optimization is done, +you can study OptGuard result. + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbcoptguardsmoothness( + minbcstate state, + const xparams _params = alglib::xdefault); +void alglib::minbcoptguardsmoothness( + minbcstate state, + ae_int_t level, + const xparams _params = alglib::xdefault); + +
    +
     
    /************************************************************************* This family of functions is used to launcn iterations of nonlinear optimizer @@ -24476,44 +25821,46 @@ gradient. Depending on the specific function used to create optimizer object - (either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF() + (either MinBCCreate() for analytical gradient or MinBCCreateF() for numerical differentiation) you should choose appropriate variant of - MinLBFGSOptimize() - one which accepts function AND gradient or one + MinBCOptimize() - one which accepts function AND gradient or one which accepts function ONLY. - Be careful to choose variant of MinLBFGSOptimize() which corresponds to + Be careful to choose variant of MinBCOptimize() which corresponds to your optimization scheme! Table below lists different combinations of - callback (function/gradient) passed to MinLBFGSOptimize() and specific + callback (function/gradient) passed to MinBCOptimize() and specific function used to create optimizer. - | USER PASSED TO MinLBFGSOptimize() + | USER PASSED TO MinBCOptimize() CREATED WITH | function only | function and gradient ------------------------------------------------------------ - MinLBFGSCreateF() | work FAIL - MinLBFGSCreate() | FAIL work + MinBCCreateF() | works FAILS + MinBCCreate() | FAILS works Here "FAIL" denotes inappropriate combinations of optimizer creation - function and MinLBFGSOptimize() version. Attemps to use such - combination (for example, to create optimizer with MinLBFGSCreateF() and - to pass gradient information to MinCGOptimize()) will lead to exception - being thrown. Either you did not pass gradient when it WAS needed or - you passed gradient when it was NOT needed. + function and MinBCOptimize() version. Attemps to use such + combination (for example, to create optimizer with MinBCCreateF() + and to pass gradient information to MinCGOptimize()) will lead to + exception being thrown. Either you did not pass gradient when it WAS + needed or you passed gradient when it was NOT needed. -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void minlbfgsoptimize(minlbfgsstate &state, +
    void minbcoptimize(minbcstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minlbfgsoptimize(minlbfgsstate &state, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minbcoptimize(minbcstate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); + void *ptr = NULL, + const xparams _xparams = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* This subroutine submits request for termination of running optimizer. It @@ -24539,95 +25886,132 @@ -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgsrequesttermination(minlbfgsstate state); +
    void alglib::minbcrequesttermination( + minbcstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine restarts LBFGS algorithm from new point. All optimization -parameters are left unchanged. +This subroutine restarts algorithm from new point. +All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. +must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: - State - structure used to store algorithm state + State - structure previously allocated with MinBCCreate call. X - new starting point. -- ALGLIB -- - Copyright 30.07.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgsrestartfrom(minlbfgsstate state, real_1d_array x); +
    void alglib::minbcrestartfrom( + minbcstate state, + real_1d_array x, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -L-BFGS algorithm results +BC results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution - Rep - optimization report: - * Rep.TerminationType completetion code: - * -8 internal integrity control detected infinite - or NAN values in function/gradient. Abnormal - termination signalled. - * -7 gradient verification failed. - See MinLBFGSSetGradientCheck() for more information. - * -2 rounding errors prevent further improvement. - X contains best point found. - * -1 incorrect parameters were specified - * 1 relative function improvement is no more than - EpsF. - * 2 relative step is no more than EpsX. - * 4 gradient norm is no more than EpsG - * 5 MaxIts steps was taken - * 7 stopping conditions are too stringent, - further improvement is impossible - * 8 terminated by user who called minlbfgsrequesttermination(). - X contains point which was "current accepted" when - termination request was submitted. - * Rep.IterationsCount contains iterations count - * NFEV countains number of function calculations + Rep - optimization report. You should check Rep.TerminationType + in order to distinguish successful termination from + unsuccessful one: + * -8 internal integrity control detected infinite or + NAN values in function/gradient. Abnormal + termination signalled. + * -3 inconsistent constraints. + * 1 relative function improvement is no more than EpsF. + * 2 scaled step is no more than EpsX. + * 4 scaled gradient norm is no more than EpsG. + * 5 MaxIts steps was taken + * 8 terminated by user who called minbcrequesttermination(). + X contains point which was "current accepted" when + termination request was submitted. + More information about fields of this structure can be + found in the comments on MinBCReport datatype. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgsresults( - minlbfgsstate state, +
    void alglib::minbcresults( + minbcstate state, real_1d_array& x, - minlbfgsreport& rep); + minbcreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -L-BFGS algorithm results +BC results -Buffered implementation of MinLBFGSResults which uses pre-allocated buffer +Buffered implementation of MinBCResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- - Copyright 20.08.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgsresultsbuf( - minlbfgsstate state, +
    void alglib::minbcresultsbuf( + minbcstate state, real_1d_array& x, - minlbfgsreport& rep); + minbcreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets stopping conditions for L-BFGS optimization algorithm. +This function sets boundary constraints for BC optimizer. + +Boundary constraints are inactive by default (after initial creation). +They are preserved after algorithm restart with MinBCRestartFrom(). + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[N]. + If some (all) variables are unbounded, you may specify + very small number or -INF. + BndU - upper bounds, array[N]. + If some (all) variables are unbounded, you may specify + very large number or +INF. + +NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th +variable will be "frozen" at X[i]=BndL[i]=BndU[i]. + +NOTE 2: this solver has following useful properties: +* bound constraints are always satisfied exactly +* function is evaluated only INSIDE area specified by bound constraints, + even when numerical differentiation is used (algorithm adjusts nodes + according to boundary constraints) + + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbcsetbc( + minbcstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function sets stopping conditions for the optimizer. INPUT PARAMETERS: State - structure which stores algorithm state @@ -24637,7 +26021,7 @@ * |.| means Euclidian norm * v - scaled gradient vector, v[i]=g[i]*s[i] * g - gradient - * s - scaling coefficients set by MinLBFGSSetScale() + * s - scaling coefficients set by MinBCSetScale() EpsF - >=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} @@ -24647,131 +26031,48 @@ the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - ste pvector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinLBFGSSetScale() + * dx - step vector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinBCSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. -Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to -automatic stopping criterion selection (small EpsX). +Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead +to automatic stopping criterion selection. + +NOTE: when SetCond() called with non-zero MaxIts, BC solver may perform + slightly more than MaxIts iterations. I.e., MaxIts sets non-strict + limit on iterations count. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetcond( - minlbfgsstate state, +
    void alglib::minbcsetcond( + minbcstate state, double epsg, double epsf, double epsx, - ae_int_t maxits); - -
    -

    Examples:   [1]  [2]  [3]  

    - -
    -
    /************************************************************************* -This subroutine turns on verification of the user-supplied analytic -gradient: -* user calls this subroutine before optimization begins -* MinLBFGSOptimize() is called -* prior to actual optimization, for each component of parameters being - optimized X[i] algorithm performs following steps: - * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], - where X[i] is i-th component of the initial point and S[i] is a scale - of i-th parameter - * if needed, steps are bounded with respect to constraints on X[] - * F(X) is evaluated at these trial points - * we perform one more evaluation in the middle point of the interval - * we build cubic model using function values and derivatives at trial - points and we compare its prediction with actual value in the middle - point - * in case difference between prediction and actual value is higher than - some predetermined threshold, algorithm stops with completion code -7; - Rep.VarIdx is set to index of the parameter with incorrect derivative. -* after verification is over, algorithm proceeds to the actual optimization. - -NOTE 1: verification needs N (parameters count) gradient evaluations. It - is very costly and you should use it only for low dimensional - problems, when you want to be sure that you've correctly - calculated analytic derivatives. You should not use it in the - production code (unless you want to check derivatives provided by - some third party). - -NOTE 2: you should carefully choose TestStep. Value which is too large - (so large that function behaviour is significantly non-cubic) will - lead to false alarms. You may use different step for different - parameters by means of setting scale with MinLBFGSSetScale(). - -NOTE 3: this function may lead to false positives. In case it reports that - I-th derivative was calculated incorrectly, you may decrease test - step and try one more time - maybe your function changes too - sharply and your step is too large for such rapidly chanding - function. - -INPUT PARAMETERS: - State - structure used to store algorithm state - TestStep - verification step: - * TestStep=0 turns verification off - * TestStep>0 activates verification - - -- ALGLIB -- - Copyright 24.05.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minlbfgssetgradientcheck( - minlbfgsstate state, - double teststep); - -
    - -
    -
    /************************************************************************* -Modification of the preconditioner: Cholesky factorization of approximate -Hessian is used. - -INPUT PARAMETERS: - State - structure which stores algorithm state - P - triangular preconditioner, Cholesky factorization of - the approximate Hessian. array[0..N-1,0..N-1], - (if larger, only leading N elements are used). - IsUpper - whether upper or lower triangle of P is given - (other triangle is not referenced) - -After call to this function preconditioner is changed to P (P is copied -into the internal buffer). - -NOTE: you can change preconditioner "on the fly", during algorithm -iterations. - -NOTE 2: P should be nonsingular. Exception will be thrown otherwise. - - -- ALGLIB -- - Copyright 13.10.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minlbfgssetpreccholesky( - minlbfgsstate state, - real_2d_array p, - bool isupper); + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Modification of the preconditioner: default preconditioner (simple -scaling, same for all elements of X) is used. +Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: State - structure which stores algorithm state -NOTE: you can change preconditioner "on the fly", during algorithm -iterations. - -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetprecdefault(minlbfgsstate state); +
    void alglib::minbcsetprecdefault( + minbcstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* Modification of the preconditioner: diagonal of approximate Hessian is @@ -24782,20 +26083,20 @@ D - diagonal of the approximate Hessian, array[0..N-1], (if larger, only leading N elements are used). -NOTE: you can change preconditioner "on the fly", during algorithm -iterations. - -NOTE 2: D[i] should be positive. Exception will be thrown otherwise. +NOTE 1: D[i] should be positive. Exception will be thrown otherwise. -NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. +NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetprecdiag(minlbfgsstate state, real_1d_array d); +
    void alglib::minbcsetprecdiag( + minbcstate state, + real_1d_array d, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* Modification of the preconditioner: scale-based diagonal preconditioning. @@ -24808,8 +26109,8 @@ In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), can greatly improve convergence. -IMPRTANT: you should set scale of your variables with MinLBFGSSetScale() -call (before or after MinLBFGSSetPrecScale() call). Without knowledge of +IMPRTANT: you should set scale of your variables with MinBCSetScale() +call (before or after MinBCSetPrecScale() call). Without knowledge of the scale of your variables scale-based preconditioner will be just unit matrix. @@ -24819,13 +26120,15 @@ -- ALGLIB -- Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetprecscale(minlbfgsstate state); +
    void alglib::minbcsetprecscale( + minbcstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets scaling coefficients for LBFGS optimizer. +This function sets scaling coefficients for BC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of @@ -24836,12 +26139,12 @@ Scaling is also used by finite difference variant of the optimizer - step along I-th axis is equal to DiffStep*S[I]. -In most optimizers (and in the LBFGS too) scaling is NOT a form of +In most optimizers (and in the BC too) scaling is NOT a form of preconditioning. It just affects stopping conditions. You should set -preconditioner by separate call to one of the MinLBFGSSetPrec...() +preconditioner by separate call to one of the MinBCSetPrec...() functions. -There is special preconditioning mode, however, which uses scaling +There is a special preconditioning mode, however, which uses scaling coefficients to form diagonal preconditioning matrix. You can turn this mode on, if you want. But you should understand that scaling is not the same thing as preconditioning - these are two different, although related @@ -24855,32 +26158,38 @@ -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetscale(minlbfgsstate state, real_1d_array s); +
    void alglib::minbcsetscale( + minbcstate state, + real_1d_array s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* This function sets maximum step length INPUT PARAMETERS: State - structure which stores algorithm state - StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if - you don't want to limit step length. + StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't + want to limit step length. Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too -large steps which leads to overflow. This function allows us to reject +large steps which lead to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetstpmax(minlbfgsstate state, double stpmax); +
    void alglib::minbcsetstpmax( + minbcstate state, + double stpmax, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* This function turns on/off reporting. @@ -24890,16 +26199,18 @@ NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinLBFGSOptimize(). - +provided to MinBCOptimize(). -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlbfgssetxrep(minlbfgsstate state, bool needxrep); +
    void alglib::minbcsetxrep( + minbcstate state, + bool needxrep, + const xparams _params = alglib::xdefault);
    - +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -24922,29 +26233,87 @@
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    -    // using LBFGS method.
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
    +    //
    +    // subject to box constraints
    +    //
    +    //     -1<=x<=+1, -1<=y<=+1
    +    //
    +    // using MinBC optimizer with:
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see minbcsetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
    +    //
    +    // First, we create optimizer object and tune its properties:
    +    // * set box constraints
    +    // * set variable scales
    +    // * set stopping criteria
         //
         real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[-1,-1]";
    +    real_1d_array bndu = "[+1,+1]";
    +    minbcstate state;
    +    double epsg = 0;
         double epsf = 0;
    -    double epsx = 0;
    +    double epsx = 0.000001;
         ae_int_t maxits = 0;
    -    minlbfgsstate state;
    -    minlbfgsreport rep;
    +    minbccreate(x, state);
    +    minbcsetbc(state, bndl, bndu);
    +    minbcsetscale(state, s);
    +    minbcsetcond(state, epsg, epsf, epsx, maxits);
     
    -    minlbfgscreate(1, x, state);
    -    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlbfgsoptimize(state, function1_grad);
    -    minlbfgsresults(state, x, rep);
    +    //
    +    // Then we activate OptGuard integrity checking.
    +    //
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target function (C0 continuity violation)
    +    // * nonsmoothness of the target function (C1 continuity violation)
    +    // * erroneous analytic gradient, i.e. one inconsistent with actual
    +    //   change in the target/constraints
    +    //
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
    +    //
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION. DO NOT USE IT IN PRODUCTION CODE!!!!!!!
    +    //
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
    +    //
    +    minbcoptguardsmoothness(state);
    +    minbcoptguardgradient(state, 0.001);
     
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +    //
    +    // Optimize and evaluate results
    +    //
    +    minbcreport rep;
    +    alglib::minbcoptimize(state, function1_grad);
    +    minbcresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,1]
    +
    +    //
    +    // Check that OptGuard did not report errors
    +    //
    +    // NOTE: want to test OptGuard? Try breaking the gradient - say, add
    +    //       1.0 to some of its components.
    +    //
    +    optguardreport ogrep;
    +    minbcoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -24953,530 +26322,662 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
    +void function1_func(const real_1d_array &x, double &func, void *ptr)
     {
         //
         // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    -    // and its derivatives df/d0 and df/dx1
         //
         func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    -    grad[0] = 400*pow(x[0]+3,3);
    -    grad[1] = 4*pow(x[1]-3,3);
     }
     
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    -    // using LBFGS method.
    +    // This example demonstrates minimization of
         //
    -    // Several advanced techniques are demonstrated:
    -    // * upper limit on step size
    -    // * restart from new point
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
    +    //
    +    // subject to box constraints
    +    //
    +    //    -1<=x<=+1, -1<=y<=+1
    +    //
    +    // using MinBC optimizer with:
    +    // * numerical differentiation being used
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see minbcsetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
         //
         real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[-1,-1]";
    +    real_1d_array bndu = "[+1,+1]";
    +    minbcstate state;
    +    double epsg = 0;
         double epsf = 0;
    -    double epsx = 0;
    -    double stpmax = 0.1;
    +    double epsx = 0.000001;
         ae_int_t maxits = 0;
    -    minlbfgsstate state;
    -    minlbfgsreport rep;
    -
    -    // first run
    -    minlbfgscreate(1, x, state);
    -    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    -    minlbfgssetstpmax(state, stpmax);
    -    alglib::minlbfgsoptimize(state, function1_grad);
    -    minlbfgsresults(state, x, rep);
    -
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    -
    -    // second run - algorithm is restarted
    -    x = "[10,10]";
    -    minlbfgsrestartfrom(state, x);
    -    alglib::minlbfgsoptimize(state, function1_grad);
    -    minlbfgsresults(state, x, rep);
    -
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    -    return 0;
    -}
    -
    +    double diffstep = 1.0e-6;
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +    //
    +    // Now we are ready to actually optimize something:
    +    // * first we create optimizer
    +    // * we add boundary constraints
    +    // * we tune stopping conditions
    +    // * and, finally, optimize and obtain results...
    +    //
    +    minbccreatef(x, diffstep, state);
    +    minbcsetbc(state, bndl, bndu);
    +    minbcsetscale(state, s);
    +    minbcsetcond(state, epsg, epsf, epsx, maxits);
     
    -using namespace alglib;
    -void s1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr)
    -{
         //
    -    // this callback calculates f(x) = (1+x)^(-0.2) + (1-x)^(-0.3) + 1000*x and its gradient.
    +    // Then we activate OptGuard integrity checking.
         //
    -    // function is trimmed when we calculate it near the singular points or outside of the [-1,+1].
    -    // Note that we do NOT calculate gradient in this case.
    +    // Numerical differentiation always produces "correct" gradient
    +    // (with some truncation error, but unbiased). Thus, we just have
    +    // to check smoothness properties of the target: C0 and C1 continuity.
         //
    -    if( (x[0]<=-0.999999999999) || (x[0]>=+0.999999999999) )
    -    {
    -        func = 1.0E+300;
    -        return;
    -    }
    -    func = pow(1+x[0],-0.2) + pow(1-x[0],-0.3) + 1000*x[0];
    -    grad[0] = -0.2*pow(1+x[0],-1.2) +0.3*pow(1-x[0],-1.3) + 1000;
    -}
    +    // Sometimes user accidentally tries to solve nonsmooth problems
    +    // with smooth optimizer. OptGuard helps to detect such situations
    +    // early, at the prototyping stage.
    +    //
    +    minbcoptguardsmoothness(state);
     
    -int main(int argc, char **argv)
    -{
         //
    -    // This example demonstrates minimization of f(x) = (1+x)^(-0.2) + (1-x)^(-0.3) + 1000*x.
    -    // This function has singularities at the boundary of the [-1,+1], but technique called
    -    // "function trimming" allows us to solve this optimization problem.
    +    // Optimize and evaluate results
         //
    -    // See http://www.alglib.net/optimization/tipsandtricks.php#ftrimming for more information
    -    // on this subject.
    +    minbcreport rep;
    +    alglib::minbcoptimize(state, function1_func);
    +    minbcresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,1]
    +
         //
    -    real_1d_array x = "[0]";
    -    double epsg = 1.0e-6;
    -    double epsf = 0;
    -    double epsx = 0;
    -    ae_int_t maxits = 0;
    -    minlbfgsstate state;
    -    minlbfgsreport rep;
    -
    -    minlbfgscreate(1, x, state);
    -    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlbfgsoptimize(state, s1_grad);
    -    minlbfgsresults(state, x, rep);
    -
    -    printf("%s\n", x.tostring(5).c_str()); // EXPECTED: [-0.99917305]
    -    return 0;
    -}
    -
    -
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    -
    -using namespace alglib;
    -void function1_func(const real_1d_array &x, double &func, void *ptr)
    -{
    -    //
    -    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    -    //
    -    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    -}
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    -    // using numerical differentiation to calculate gradient.
    +    // Check that OptGuard did not report errors
         //
    -    real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    -    double epsf = 0;
    -    double epsx = 0;
    -    double diffstep = 1.0e-6;
    -    ae_int_t maxits = 0;
    -    minlbfgsstate state;
    -    minlbfgsreport rep;
    -
    -    minlbfgscreatef(1, x, diffstep, state);
    -    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlbfgsoptimize(state, function1_func);
    -    minlbfgsresults(state, x, rep);
    -
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +    // Want to challenge OptGuard? Try to make your problem
    +    // nonsmooth by replacing 100*(x+3)^4 by 100*|x+3| and
    +    // re-run optimizer.
    +    //
    +    optguardreport ogrep;
    +    minbcoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
    -minlmreport
    -minlmstate
    +minbleicreport
    +minbleicstate
    -minlmcreatefgh
    -minlmcreatefgj
    -minlmcreatefj
    -minlmcreatev
    -minlmcreatevgj
    -minlmcreatevj
    -minlmoptimize
    -minlmrequesttermination
    -minlmrestartfrom
    -minlmresults
    -minlmresultsbuf
    -minlmsetacctype
    -minlmsetbc
    -minlmsetcond
    -minlmsetgradientcheck
    -minlmsetscale
    -minlmsetstpmax
    -minlmsetxrep
    +minbleiccreate
    +minbleiccreatef
    +minbleicoptguardgradient
    +minbleicoptguardnonc1test0results
    +minbleicoptguardnonc1test1results
    +minbleicoptguardresults
    +minbleicoptguardsmoothness
    +minbleicoptimize
    +minbleicrequesttermination
    +minbleicrestartfrom
    +minbleicresults
    +minbleicresultsbuf
    +minbleicsetbc
    +minbleicsetcond
    +minbleicsetlc
    +minbleicsetprecdefault
    +minbleicsetprecdiag
    +minbleicsetprecscale
    +minbleicsetscale
    +minbleicsetstpmax
    +minbleicsetxrep
    - - - - - + + +
    minlm_d_fgh Nonlinear Hessian-based optimization for general functions
    minlm_d_restarts Efficient restarts of LM optimizer
    minlm_d_v Nonlinear least squares optimization using function vector only
    minlm_d_vb Bound constrained nonlinear least squares optimization
    minlm_d_vj Nonlinear least squares optimization using function vector and Jacobian
    minbleic_d_1 Nonlinear optimization with bound constraints
    minbleic_d_2 Nonlinear optimization with linear inequality constraints
    minbleic_numdiff Nonlinear optimization with bound constraints and numerical differentiation
    - +
     
    /************************************************************************* -Optimization report, filled by MinLMResults() function +This structure stores optimization report: +* IterationsCount number of iterations +* NFEV number of gradient evaluations +* TerminationType termination type (see below) -FIELDS: -* TerminationType, completetion code: - * -7 derivative correctness check failed; - see rep.funcidx, rep.varidx for - more information. - * -3 constraints are inconsistent - * 1 relative function improvement is no more than - EpsF. - * 2 relative step is no more than EpsX. - * 4 gradient is no more than EpsG. - * 5 MaxIts steps was taken - * 7 stopping conditions are too stringent, - further improvement is impossible - * 8 terminated by user who called MinLMRequestTermination(). - X contains point which was "current accepted" when termination - request was submitted. -* IterationsCount, contains iterations count -* NFunc, number of function calculations -* NJac, number of Jacobi matrix calculations -* NGrad, number of gradient calculations -* NHess, number of Hessian calculations -* NCholesky, number of Cholesky decomposition calculations +TERMINATION CODES + +TerminationType field contains completion code, which can be: + -8 internal integrity control detected infinite or NAN values in + function/gradient. Abnormal termination signalled. + -3 inconsistent constraints. Feasible point is + either nonexistent or too hard to find. Try to + restart optimizer with better initial approximation + 1 relative function improvement is no more than EpsF. + 2 relative step is no more than EpsX. + 4 gradient norm is no more than EpsG + 5 MaxIts steps was taken + 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. + 8 terminated by user who called minbleicrequesttermination(). X contains + point which was "current accepted" when termination request was + submitted. + +ADDITIONAL FIELDS + +There are additional fields which can be used for debugging: +* DebugEqErr error in the equality constraints (2-norm) +* DebugFS f, calculated at projection of initial point + to the feasible set +* DebugFF f, calculated at the final point +* DebugDX |X_start-X_final| *************************************************************************/ -
    class minlmreport +
    class minbleicreport { ae_int_t iterationscount; - ae_int_t terminationtype; - ae_int_t funcidx; + ae_int_t nfev; ae_int_t varidx; - ae_int_t nfunc; - ae_int_t njac; - ae_int_t ngrad; - ae_int_t nhess; - ae_int_t ncholesky; + ae_int_t terminationtype; + double debugeqerr; + double debugfs; + double debugff; + double debugdx; + ae_int_t debugfeasqpits; + ae_int_t debugfeasgpaits; + ae_int_t inneriterationscount; + ae_int_t outeriterationscount; };
    - +
     
    /************************************************************************* -Levenberg-Marquardt optimizer. - -This structure should be created using one of the MinLMCreate???() -functions. You should not access its fields directly; use ALGLIB functions -to work with it. +This object stores nonlinear optimizer state. +You should use functions provided by MinBLEIC subpackage to work with this +object *************************************************************************/ -
    class minlmstate +
    class minbleicstate { };
    - +
     
    /************************************************************************* - LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION + BOUND CONSTRAINED OPTIMIZATION + WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS DESCRIPTION: -This function is used to find minimum of general form (not "sum-of- --squares") function - F = F(x[0], ..., x[n-1]) -using its gradient and Hessian. Levenberg-Marquardt modification with -L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization -after each Levenberg-Marquardt step is used. - +The subroutine minimizes function F(x) of N arguments subject to any +combination of: +* bound constraints +* linear inequality constraints +* linear equality constraints REQUIREMENTS: -This algorithm will request following information during its operation: +* user must provide function value and gradient +* starting point X0 must be feasible or + not too far away from the feasible set +* grad(f) must be Lipschitz continuous on a level set: + L = { x : f(x)<=f(x0) } +* function must be defined everywhere on the feasible set F -* function value F at given point X -* F and gradient G (simultaneously) at given point X -* F, G and Hessian H (simultaneously) at given point X +USAGE: -There are several overloaded versions of MinLMOptimize() function which -correspond to different LM-like optimization algorithms provided by this -unit. You should choose version which accepts func(), grad() and hess() -function pointers. First pointer is used to calculate F at given point, -second one calculates F(x) and grad F(x), third one calculates F(x), -grad F(x), hess F(x). +Constrained optimization if far more complex than the unconstrained one. +Here we give very brief outline of the BLEIC optimizer. We strongly recommend +you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide +on optimization, which is available at http://www.alglib.net/optimization/ -You can try to initialize MinLMState structure with FGH-function and then -use incorrect version of MinLMOptimize() (for example, version which does -not provide Hessian matrix), but it will lead to exception being thrown -after first attempt to calculate Hessian. +1. User initializes algorithm state with MinBLEICCreate() call +2. USer adds boundary and/or linear constraints by calling + MinBLEICSetBC() and MinBLEICSetLC() functions. -USAGE: -1. User initializes algorithm state with MinLMCreateFGH() call -2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and - other functions -3. User calls MinLMOptimize() function which takes algorithm state and - pointers (delegates, etc.) to callback functions. -4. User calls MinLMResults() to get solution -5. Optionally, user may call MinLMRestartFrom() to solve another problem - with same N but another starting point and/or another function. - MinLMRestartFrom() allows to reuse already initialized structure. +3. User sets stopping conditions with MinBLEICSetCond(). + +4. User calls MinBLEICOptimize() function which takes algorithm state and + pointer (delegate, etc.) to callback function which calculates F/G. +5. User calls MinBLEICResults() to get solution + +6. Optionally user may call MinBLEICRestartFrom() to solve another problem + with same N but another starting point. + MinBLEICRestartFrom() allows to reuse already initialized structure. + +NOTE: if you have box-only constraints (no general linear constraints), + then MinBC optimizer can be better option. It uses special, faster + constraint activation method, which performs better on problems with + multiple constraints active at the solution. + + On small-scale problems performance of MinBC is similar to that of + MinBLEIC, but on large-scale ones (hundreds and thousands of active + constraints) it can be several times faster than MinBLEIC. INPUT PARAMETERS: - N - dimension, N>1 + N - problem dimension, N>0: * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - X - initial solution, array[0..N-1] + * if not given, automatically determined from size ofX + X - starting point, array[N]: + * it is better to set X to a feasible point + * but X can be infeasible, in which case algorithm will try + to find feasible point first, using X as initial + approximation. OUTPUT PARAMETERS: - State - structure which stores algorithm state - -NOTES: -1. you may tune stopping conditions with MinLMSetCond() function -2. if target function contains exp() or other fast growing functions, and - optimization algorithm makes too large steps which leads to overflow, - use MinLMSetStpMax() function to bound algorithm's steps. + State - structure stores algorithm state -- ALGLIB -- - Copyright 30.03.2009 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmcreatefgh(real_1d_array x, minlmstate& state); -void alglib::minlmcreatefgh( +
    void alglib::minbleiccreate( + real_1d_array x, + minbleicstate& state, + const xparams _params = alglib::xdefault); +void alglib::minbleiccreate( ae_int_t n, real_1d_array x, - minlmstate& state); + minbleicstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This is obsolete function. +The subroutine is finite difference variant of MinBLEICCreate(). It uses +finite differences in order to differentiate target function. -Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). +Description below contains information which is specific to this function +only. We recommend to read comments on MinBLEICCreate() in order to get +more information about creation of BLEIC optimizer. - -- ALGLIB -- - Copyright 30.03.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minlmcreatefgj( - ae_int_t m, - real_1d_array x, - minlmstate& state); -void alglib::minlmcreatefgj( - ae_int_t n, - ae_int_t m, - real_1d_array x, - minlmstate& state); +INPUT PARAMETERS: + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + X - starting point, array[0..N-1]. + DiffStep- differentiation step, >0 -
    - -
    -
    /************************************************************************* -This function is considered obsolete since ALGLIB 3.1.0 and is present for -backward compatibility only. We recommend to use MinLMCreateVJ, which -provides similar, but more consistent and feature-rich interface. +OUTPUT PARAMETERS: + State - structure which stores algorithm state + +NOTES: +1. algorithm uses 4-point central formula for differentiation. +2. differentiation step along I-th axis is equal to DiffStep*S[I] where + S[] is scaling vector which can be set by MinBLEICSetScale() call. +3. we recommend you to use moderate values of differentiation step. Too + large step will result in too large truncation errors, while too small + step will result in too large numerical errors. 1.0E-6 can be good + value to start with. +4. Numerical differentiation is very inefficient - one gradient + calculation needs 4*N function evaluations. This function will work for + any N - either small (1...10), moderate (10...100) or large (100...). + However, performance penalty will be too severe for any N's except for + small ones. + We should also say that code which relies on numerical differentiation + is less robust and precise. CG needs exact gradient values. Imprecise + gradient may slow down convergence, especially on highly nonlinear + problems. + Thus we recommend to use this function for fast prototyping on small- + dimensional problems only, and to implement analytical gradient as soon + as possible. -- ALGLIB -- - Copyright 30.03.2009 by Bochkanov Sergey + Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmcreatefj( - ae_int_t m, +
    void alglib::minbleiccreatef( real_1d_array x, - minlmstate& state); -void alglib::minlmcreatefj( + double diffstep, + minbleicstate& state, + const xparams _params = alglib::xdefault); +void alglib::minbleiccreatef( ae_int_t n, - ae_int_t m, real_1d_array x, - minlmstate& state); + double diffstep, + minbleicstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* - IMPROVED LEVENBERG-MARQUARDT METHOD FOR - NON-LINEAR LEAST SQUARES OPTIMIZATION - -DESCRIPTION: -This function is used to find minimum of function which is represented as -sum of squares: - F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) -using value of function vector f[] only. Finite differences are used to -calculate Jacobian. +This function activates/deactivates verification of the user-supplied +analytic gradient. +Upon activation of this option OptGuard integrity checker performs +numerical differentiation of your target function at the initial point +(note: future versions may also perform check at the final point) and +compares numerical gradient with analytic one provided by you. -REQUIREMENTS: -This algorithm will request following information during its operation: -* function vector f[] at given point X - -There are several overloaded versions of MinLMOptimize() function which -correspond to different LM-like optimization algorithms provided by this -unit. You should choose version which accepts fvec() callback. +If difference is too large, an error flag is set and optimization session +continues. After optimization session is over, you can retrieve the report +which stores both gradients and specific components highlighted as +suspicious by the OptGuard. -You can try to initialize MinLMState structure with VJ function and then -use incorrect version of MinLMOptimize() (for example, version which -works with general form function and does not accept function vector), but -it will lead to exception being thrown after first attempt to calculate -Jacobian. +The primary OptGuard report can be retrieved with minbleicoptguardresults(). +IMPORTANT: gradient check is a high-overhead option which will cost you + about 3*N additional function evaluations. In many cases it may + cost as much as the rest of the optimization session. -USAGE: -1. User initializes algorithm state with MinLMCreateV() call -2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and - other functions -3. User calls MinLMOptimize() function which takes algorithm state and - callback functions. -4. User calls MinLMResults() to get solution -5. Optionally, user may call MinLMRestartFrom() to solve another problem - with same N/M but another starting point and/or another function. - MinLMRestartFrom() allows to reuse already initialized structure. + YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO + CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. +NOTE: unlike previous incarnation of the gradient checking code, OptGuard + does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: - N - dimension, N>1 - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - M - number of functions f[i] - X - initial solution, array[0..N-1] - DiffStep- differentiation step, >0 + State - structure used to store algorithm state + TestStep - verification step used for numerical differentiation: + * TestStep=0 turns verification off + * TestStep>0 activates verification + You should carefully choose TestStep. Value which is + too large (so large that function behavior is non- + cubic at this scale) will lead to false alarms. Too + short step will result in rounding errors dominating + numerical derivative. -OUTPUT PARAMETERS: - State - structure which stores algorithm state + You may use different step for different parameters by + means of setting scale with minbleicsetscale(). -See also MinLMIteration, MinLMResults. +=== EXPLANATION ========================================================== -NOTES: -1. you may tune stopping conditions with MinLMSetCond() function -2. if target function contains exp() or other fast growing functions, and - optimization algorithm makes too large steps which leads to overflow, - use MinLMSetStpMax() function to bound algorithm's steps. +In order to verify gradient algorithm performs following steps: + * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], + where X[i] is i-th component of the initial point and S[i] is a scale + of i-th parameter + * F(X) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point -- ALGLIB -- - Copyright 30.03.2009 by Bochkanov Sergey + Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmcreatev( - ae_int_t m, - real_1d_array x, - double diffstep, - minlmstate& state); -void alglib::minlmcreatev( - ae_int_t n, - ae_int_t m, - real_1d_array x, - double diffstep, - minlmstate& state); +
    void alglib::minbleicoptguardgradient( + minbleicstate state, + double teststep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -This is obsolete function. +Detailed results of the OptGuard integrity check for nonsmoothness test #0 -Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). +Nonsmoothness (non-C1) test #0 studies function values (not gradient!) +obtained during line searches and monitors behavior of the directional +derivative estimate. + +This test is less powerful than test #1, but it does not depend on the +gradient values and thus it is more robust against artifacts introduced by +numerical differentiation. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #0 "strong" report + lngrep - C1 test #0 "long" report -- ALGLIB -- - Copyright 30.03.2009 by Bochkanov Sergey + Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmcreatevgj( - ae_int_t m, - real_1d_array x, - minlmstate& state); -void alglib::minlmcreatevgj( - ae_int_t n, - ae_int_t m, - real_1d_array x, - minlmstate& state); +
    void alglib::minbleicoptguardnonc1test0results( + minbleicstate state, + optguardnonc1test0report& strrep, + optguardnonc1test0report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #1 + +Nonsmoothness (non-C1) test #1 studies individual components of the +gradient computed during line search. + +When precise analytic gradient is provided this test is more powerful than +test #0 which works with function values and ignores user-provided +gradient. However, test #0 becomes more powerful when numerical +differentiation is employed (in such cases test #1 detects higher levels +of numerical noise and becomes too conservative). + +This test also tells specific components of the gradient which violate C1 +continuity, which makes it more informative than #0, which just tells that +continuity is violated. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* vidx - is an index of the variable in [0,N) with nonsmooth derivative +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], g[] - arrays of length CNT which store step lengths and gradient + values at these points; g[i] is evaluated in x0+stp[i]*d and contains + vidx-th component of the gradient. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== -
    - -
    -
    /************************************************************************* - IMPROVED LEVENBERG-MARQUARDT METHOD FOR - NON-LINEAR LEAST SQUARES OPTIMIZATION +INPUT PARAMETERS: + state - algorithm state -DESCRIPTION: -This function is used to find minimum of function which is represented as -sum of squares: - F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) -using value of function vector f[] and Jacobian of f[]. +OUTPUT PARAMETERS: + strrep - C1 test #1 "strong" report + lngrep - C1 test #1 "long" report + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbleicoptguardnonc1test1results( + minbleicstate state, + optguardnonc1test1report& strrep, + optguardnonc1test1report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Results of OptGuard integrity check, should be called after optimization +session is over. + +=== PRIMARY REPORT ======================================================= + +OptGuard performs several checks which are intended to catch common errors +in the implementation of nonlinear function/gradient: +* incorrect analytic gradient +* discontinuous (non-C0) target functions (constraints) +* nonsmooth (non-C1) target functions (constraints) + +Each of these checks is activated with appropriate function: +* minbleicoptguardgradient() for gradient verification +* minbleicoptguardsmoothness() for C0/C1 checks + +Following flags are set when these errors are suspected: +* rep.badgradsuspected, and additionally: + * rep.badgradvidx for specific variable (gradient element) suspected + * rep.badgradxbase, a point where gradient is tested + * rep.badgraduser, user-provided gradient (stored as 2D matrix with + single row in order to make report structure compatible with more + complex optimizers like MinNLC or MinLM) + * rep.badgradnum, reference gradient obtained via numerical + differentiation (stored as 2D matrix with single row in order to make + report structure compatible with more complex optimizers like MinNLC + or MinLM) +* rep.nonc0suspected +* rep.nonc1suspected + +=== ADDITIONAL REPORTS/LOGS ============================================== + +Several different tests are performed to catch C0/C1 errors, you can find +out specific test signaled error by looking to: +* rep.nonc0test0positive, for non-C0 test #0 +* rep.nonc1test0positive, for non-C1 test #0 +* rep.nonc1test1positive, for non-C1 test #1 + +Additional information (including line search logs) can be obtained by +means of: +* minbleicoptguardnonc1test0results() +* minbleicoptguardnonc1test1results() +which return detailed error reports, specific points where discontinuities +were found, and so on. -REQUIREMENTS: -This algorithm will request following information during its operation: +========================================================================== -* function vector f[] at given point X -* function vector f[] and Jacobian of f[] (simultaneously) at given point +INPUT PARAMETERS: + state - algorithm state -There are several overloaded versions of MinLMOptimize() function which -correspond to different LM-like optimization algorithms provided by this -unit. You should choose version which accepts fvec() and jac() callbacks. -First one is used to calculate f[] at given point, second one calculates -f[] and Jacobian df[i]/dx[j]. +OUTPUT PARAMETERS: + rep - generic OptGuard report; more detailed reports can be + retrieved with other functions. -You can try to initialize MinLMState structure with VJ function and then -use incorrect version of MinLMOptimize() (for example, version which -works with general form function and does not provide Jacobian), but it -will lead to exception being thrown after first attempt to calculate -Jacobian. +NOTE: false negatives (nonsmooth problems are not identified as nonsmooth + ones) are possible although unlikely. + The reason is that you need to make several evaluations around + nonsmoothness in order to accumulate enough information about + function curvature. Say, if you start right from the nonsmooth point, + optimizer simply won't get enough data to understand what is going + wrong before it terminates due to abrupt changes in the derivative. + It is also possible that "unlucky" step will move us to the + termination too quickly. -USAGE: -1. User initializes algorithm state with MinLMCreateVJ() call -2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and - other functions -3. User calls MinLMOptimize() function which takes algorithm state and - callback functions. -4. User calls MinLMResults() to get solution -5. Optionally, user may call MinLMRestartFrom() to solve another problem - with same N/M but another starting point and/or another function. - MinLMRestartFrom() allows to reuse already initialized structure. + Our current approach is to have less than 0.1% false negatives in + our test examples (measured with multiple restarts from random + points), and to have exactly 0% false positives. + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbleicoptguardresults( + minbleicstate state, + optguardreport& rep, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - N - dimension, N>1 - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - M - number of functions f[i] - X - initial solution, array[0..N-1] +
    + +
    +
    /************************************************************************* +This function activates/deactivates nonsmoothness monitoring option of +the OptGuard integrity checker. Smoothness monitor silently observes +solution process and tries to detect ill-posed problems, i.e. ones with: +a) discontinuous target function (non-C0) +b) nonsmooth target function (non-C1) -OUTPUT PARAMETERS: - State - structure which stores algorithm state +Smoothness monitoring does NOT interrupt optimization even if it suspects +that your problem is nonsmooth. It just sets corresponding flags in the +OptGuard report which can be retrieved after optimization is over. -NOTES: -1. you may tune stopping conditions with MinLMSetCond() function -2. if target function contains exp() or other fast growing functions, and - optimization algorithm makes too large steps which leads to overflow, - use MinLMSetStpMax() function to bound algorithm's steps. +Smoothness monitoring is a moderate overhead option which often adds less +than 1% to the optimizer running time. Thus, you can use it even for large +scale problems. + +NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 + continuity violations. + + First, minor errors are hard to catch - say, a 0.0001 difference in + the model values at two sides of the gap may be due to discontinuity + of the model - or simply because the model has changed. + + Second, C1-violations are especially difficult to detect in a + noninvasive way. The optimizer usually performs very short steps + near the nonsmoothness, and differentiation usually introduces a + lot of numerical noise. It is hard to tell whether some tiny + discontinuity in the slope is due to real nonsmoothness or just due + to numerical noise alone. + + Our top priority was to avoid false positives, so in some rare cases + minor errors may went unnoticed (however, in most cases they can be + spotted with restart from different initial point). + +INPUT PARAMETERS: + state - algorithm state + level - monitoring level: + * 0 - monitoring is disabled + * 1 - noninvasive low-overhead monitoring; function values + and/or gradients are recorded, but OptGuard does not + try to perform additional evaluations in order to + get more information about suspicious locations. + +=== EXPLANATION ========================================================== + +One major source of headache during optimization is the possibility of +the coding errors in the target function/constraints (or their gradients). +Such errors most often manifest themselves as discontinuity or +nonsmoothness of the target/constraints. + +Another frequent situation is when you try to optimize something involving +lots of min() and max() operations, i.e. nonsmooth target. Although not a +coding error, it is nonsmoothness anyway - and smooth optimizers usually +stop right after encountering nonsmoothness, well before reaching solution. + +OptGuard integrity checker helps you to catch such situations: it monitors +function values/gradients being passed to the optimizer and tries to +errors. Upon discovering suspicious pair of points it raises appropriate +flag (and allows you to continue optimization). When optimization is done, +you can study OptGuard result. -- ALGLIB -- - Copyright 30.03.2009 by Bochkanov Sergey + Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmcreatevj( - ae_int_t m, - real_1d_array x, - minlmstate& state); -void alglib::minlmcreatevj( - ae_int_t n, - ae_int_t m, - real_1d_array x, - minlmstate& state); +
    void alglib::minbleicoptguardsmoothness( + minbleicstate state, + const xparams _params = alglib::xdefault); +void alglib::minbleicoptguardsmoothness( + minbleicstate state, + ae_int_t level, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* This family of functions is used to launcn iterations of nonlinear optimizer @@ -25487,12 +26988,6 @@ value func at given point x grad - callback which calculates function (or merit function) value func and gradient grad at given point x - hess - callback which calculates function (or merit function) - value func, gradient grad and Hessian hess at given point x - fvec - callback which calculates function vector fi[] - at given point x - jac - callback which calculates function vector fi[] - and Jacobian jac at given point x rep - optional callback which is called after each iteration can be NULL ptr - optional pointer which is passed to func/grad/hess/jac/rep @@ -25500,51 +26995,52 @@ NOTES: -1. Depending on function used to create state structure, this algorithm - may accept Jacobian and/or Hessian and/or gradient. According to the - said above, there ase several versions of this function, which accept - different sets of callbacks. +1. This function has two different implementations: one which uses exact + (analytical) user-supplied gradient, and one which uses function value + only and numerically differentiates function in order to obtain + gradient. - This flexibility opens way to subtle errors - you may create state with - MinLMCreateFGH() (optimization using Hessian), but call function which - does not accept Hessian. So when algorithm will request Hessian, there - will be no callback to call. In this case exception will be thrown. + Depending on the specific function used to create optimizer object + (either MinBLEICCreate() for analytical gradient or MinBLEICCreateF() + for numerical differentiation) you should choose appropriate variant of + MinBLEICOptimize() - one which accepts function AND gradient or one + which accepts function ONLY. - Be careful to avoid such errors because there is no way to find them at - compile time - you can see them at runtime only. + Be careful to choose variant of MinBLEICOptimize() which corresponds to + your optimization scheme! Table below lists different combinations of + callback (function/gradient) passed to MinBLEICOptimize() and specific + function used to create optimizer. + + + | USER PASSED TO MinBLEICOptimize() + CREATED WITH | function only | function and gradient + ------------------------------------------------------------ + MinBLEICCreateF() | work FAIL + MinBLEICCreate() | FAIL work + + Here "FAIL" denotes inappropriate combinations of optimizer creation + function and MinBLEICOptimize() version. Attemps to use such + combination (for example, to create optimizer with MinBLEICCreateF() + and to pass gradient information to MinBLEICOptimize()) will lead to + exception being thrown. Either you did not pass gradient when it WAS + needed or you passed gradient when it was NOT needed. -- ALGLIB -- - Copyright 10.03.2009 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void minlmoptimize(minlmstate &state, - void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minlmoptimize(minlmstate &state, - void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), - void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minlmoptimize(minlmstate &state, - void (*func)(const real_1d_array &x, double &func, void *ptr), - void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minlmoptimize(minlmstate &state, +
    void minbleicoptimize(minbleicstate &state, void (*func)(const real_1d_array &x, double &func, void *ptr), - void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minlmoptimize(minlmstate &state, - void (*func)(const real_1d_array &x, double &func, void *ptr), + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minbleicoptimize(minbleicstate &state, void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), - void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); + void *ptr = NULL, + const xparams _xparams = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* This subroutine submits request for termination of running optimizer. It @@ -25570,178 +27066,143 @@ -- ALGLIB -- Copyright 08.10.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmrequesttermination(minlmstate state); +
    void alglib::minbleicrequesttermination( + minbleicstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine restarts LM algorithm from new point. All optimization -parameters are left unchanged. +This subroutine restarts algorithm from new point. +All optimization parameters (including constraints) are left unchanged. This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. +must have same number of dimensions) without object reallocation penalty. INPUT PARAMETERS: - State - structure used for reverse communication previously - allocated with MinLMCreateXXX call. + State - structure previously allocated with MinBLEICCreate call. X - new starting point. -- ALGLIB -- - Copyright 30.07.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmrestartfrom(minlmstate state, real_1d_array x); +
    void alglib::minbleicrestartfrom( + minbleicstate state, + real_1d_array x, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Levenberg-Marquardt algorithm results +BLEIC results INPUT PARAMETERS: State - algorithm state OUTPUT PARAMETERS: X - array[0..N-1], solution - Rep - optimization report; includes termination codes and - additional information. Termination codes are listed below, - see comments for this structure for more info. - Termination code is stored in rep.terminationtype field: - * -7 derivative correctness check failed; - see rep.funcidx, rep.varidx for - more information. - * -3 constraints are inconsistent - * 1 relative function improvement is no more than - EpsF. - * 2 relative step is no more than EpsX. - * 4 gradient is no more than EpsG. - * 5 MaxIts steps was taken - * 7 stopping conditions are too stringent, - further improvement is impossible - * 8 terminated by user who called minlmrequesttermination(). - X contains point which was "current accepted" when - termination request was submitted. + Rep - optimization report. You should check Rep.TerminationType + in order to distinguish successful termination from + unsuccessful one: + * -8 internal integrity control detected infinite or + NAN values in function/gradient. Abnormal + termination signalled. + * -3 inconsistent constraints. Feasible point is + either nonexistent or too hard to find. Try to + restart optimizer with better initial approximation + * 1 relative function improvement is no more than EpsF. + * 2 scaled step is no more than EpsX. + * 4 scaled gradient norm is no more than EpsG. + * 5 MaxIts steps was taken + * 8 terminated by user who called minbleicrequesttermination(). + X contains point which was "current accepted" when + termination request was submitted. + More information about fields of this structure can be + found in the comments on MinBLEICReport datatype. -- ALGLIB -- - Copyright 10.03.2009 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmresults( - minlmstate state, +
    void alglib::minbleicresults( + minbleicstate state, real_1d_array& x, - minlmreport& rep); + minbleicreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -Levenberg-Marquardt algorithm results +BLEIC results -Buffered implementation of MinLMResults(), which uses pre-allocated buffer +Buffered implementation of MinBLEICResults() which uses pre-allocated buffer to store X[]. If buffer size is too small, it resizes buffer. It is intended to be used in the inner cycles of performance critical algorithms where array reallocation penalty is too large to be ignored. -- ALGLIB -- - Copyright 10.03.2009 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmresultsbuf( - minlmstate state, +
    void alglib::minbleicresultsbuf( + minbleicstate state, real_1d_array& x, - minlmreport& rep); - -
    - -
    -
    /************************************************************************* -This function is used to change acceleration settings - -You can choose between three acceleration strategies: -* AccType=0, no acceleration. -* AccType=1, secant updates are used to update quadratic model after each - iteration. After fixed number of iterations (or after model breakdown) - we recalculate quadratic model using analytic Jacobian or finite - differences. Number of secant-based iterations depends on optimization - settings: about 3 iterations - when we have analytic Jacobian, up to 2*N - iterations - when we use finite differences to calculate Jacobian. - -AccType=1 is recommended when Jacobian calculation cost is prohibitive -high (several Mx1 function vector calculations followed by several NxN -Cholesky factorizations are faster than calculation of one M*N Jacobian). -It should also be used when we have no Jacobian, because finite difference -approximation takes too much time to compute. - -Table below list optimization protocols (XYZ protocol corresponds to -MinLMCreateXYZ) and acceleration types they support (and use by default). - -ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS: - -protocol 0 1 comment -V + + -VJ + + -FGH + - -DAFAULT VALUES: - -protocol 0 1 comment -V x without acceleration it is so slooooooooow -VJ x -FGH x - -NOTE: this function should be called before optimization. Attempt to call -it during algorithm iterations may result in unexpected behavior. - -NOTE: attempt to call this function with unsupported protocol/acceleration -combination will result in exception being thrown. - - -- ALGLIB -- - Copyright 14.10.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minlmsetacctype(minlmstate state, ae_int_t acctype); + minbleicreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets boundary constraints for LM optimizer +This function sets boundary constraints for BLEIC optimizer. Boundary constraints are inactive by default (after initial creation). -They are preserved until explicitly turned off with another SetBC() call. +They are preserved after algorithm restart with MinBLEICRestartFrom(). + +NOTE: if you have box-only constraints (no general linear constraints), + then MinBC optimizer can be better option. It uses special, faster + constraint activation method, which performs better on problems with + multiple constraints active at the solution. + + On small-scale problems performance of MinBC is similar to that of + MinBLEIC, but on large-scale ones (hundreds and thousands of active + constraints) it can be several times faster than MinBLEIC. INPUT PARAMETERS: State - structure stores algorithm state BndL - lower bounds, array[N]. If some (all) variables are unbounded, you may specify - very small number or -INF (latter is recommended because - it will allow solver to use better algorithm). + very small number or -INF. BndU - upper bounds, array[N]. If some (all) variables are unbounded, you may specify - very large number or +INF (latter is recommended because - it will allow solver to use better algorithm). + very large number or +INF. NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th variable will be "frozen" at X[i]=BndL[i]=BndU[i]. NOTE 2: this solver has following useful properties: * bound constraints are always satisfied exactly -* function is evaluated only INSIDE area specified by bound constraints - or at its boundary +* function is evaluated only INSIDE area specified by bound constraints, + even when numerical differentiation is used (algorithm adjusts nodes + according to boundary constraints) -- ALGLIB -- - Copyright 14.01.2011 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmsetbc( - minlmstate state, +
    void alglib::minbleicsetbc( + minbleicstate state, real_1d_array bndl, - real_1d_array bndu); + real_1d_array bndu, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function sets stopping conditions for Levenberg-Marquardt optimization -algorithm. +This function sets stopping conditions for the optimizer. INPUT PARAMETERS: State - structure which stores algorithm state @@ -25751,7 +27212,7 @@ * |.| means Euclidian norm * v - scaled gradient vector, v[i]=g[i]*s[i] * g - gradient - * s - scaling coefficients set by MinLMSetScale() + * s - scaling coefficients set by MinBLEICSetScale() EpsF - >=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} @@ -25761,86 +27222,156 @@ the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - ste pvector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinLMSetScale() + * dx - step vector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinBLEICSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. Only Levenberg-Marquardt - iterations are counted (L-BFGS/CG iterations are NOT - counted because their cost is very low compared to that of - LM). + iterations is unlimited. -Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to -automatic stopping criterion selection (small EpsX). +Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead +to automatic stopping criterion selection. + +NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform + slightly more than MaxIts iterations. I.e., MaxIts sets non-strict + limit on iterations count. -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmsetcond( - minlmstate state, +
    void alglib::minbleicsetcond( + minbleicstate state, double epsg, double epsf, double epsx, - ae_int_t maxits); + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -This subroutine turns on verification of the user-supplied analytic -gradient: -* user calls this subroutine before optimization begins -* MinLMOptimize() is called -* prior to actual optimization, for each function Fi and each component - of parameters being optimized X[j] algorithm performs following steps: - * two trial steps are made to X[j]-TestStep*S[j] and X[j]+TestStep*S[j], - where X[j] is j-th parameter and S[j] is a scale of j-th parameter - * if needed, steps are bounded with respect to constraints on X[] - * Fi(X) is evaluated at these trial points - * we perform one more evaluation in the middle point of the interval - * we build cubic model using function values and derivatives at trial - points and we compare its prediction with actual value in the middle - point - * in case difference between prediction and actual value is higher than - some predetermined threshold, algorithm stops with completion code -7; - Rep.VarIdx is set to index of the parameter with incorrect derivative, - Rep.FuncIdx is set to index of the function. -* after verification is over, algorithm proceeds to the actual optimization. +This function sets linear constraints for BLEIC optimizer. -NOTE 1: verification needs N (parameters count) Jacobian evaluations. It - is very costly and you should use it only for low dimensional - problems, when you want to be sure that you've correctly - calculated analytic derivatives. You should not use it in the - production code (unless you want to check derivatives provided - by some third party). +Linear constraints are inactive by default (after initial creation). +They are preserved after algorithm restart with MinBLEICRestartFrom(). -NOTE 2: you should carefully choose TestStep. Value which is too large - (so large that function behaviour is significantly non-cubic) will - lead to false alarms. You may use different step for different - parameters by means of setting scale with MinLMSetScale(). +INPUT PARAMETERS: + State - structure previously allocated with MinBLEICCreate call. + C - linear constraints, array[K,N+1]. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n] + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT -NOTE 3: this function may lead to false positives. In case it reports that - I-th derivative was calculated incorrectly, you may decrease test - step and try one more time - maybe your function changes too - sharply and your step is too large for such rapidly chanding - function. +NOTE 1: linear (non-bound) constraints are satisfied only approximately: +* there always exists some minor violation (about Epsilon in magnitude) + due to rounding errors +* numerical differentiation, if used, may lead to function evaluations + outside of the feasible area, because algorithm does NOT change + numerical differentiation formula according to linear constraints. +If you want constraints to be satisfied exactly, try to reformulate your +problem in such manner that all constraints will become boundary ones +(this kind of constraints is always satisfied exactly, both in the final +solution and in all intermediate points). + + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbleicsetlc( + minbleicstate state, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::minbleicsetlc( + minbleicstate state, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: - State - structure used to store algorithm state - TestStep - verification step: - * TestStep=0 turns verification off - * TestStep>0 activates verification + State - structure which stores algorithm state -- ALGLIB -- - Copyright 15.06.2012 by Bochkanov Sergey + Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmsetgradientcheck(minlmstate state, double teststep); +
    void alglib::minbleicsetprecdefault( + minbleicstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets scaling coefficients for LM optimizer. +Modification of the preconditioner: diagonal of approximate Hessian is +used. + +INPUT PARAMETERS: + State - structure which stores algorithm state + D - diagonal of the approximate Hessian, array[0..N-1], + (if larger, only leading N elements are used). + +NOTE 1: D[i] should be positive. Exception will be thrown otherwise. + +NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. + + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbleicsetprecdiag( + minbleicstate state, + real_1d_array d, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Modification of the preconditioner: scale-based diagonal preconditioning. + +This preconditioning mode can be useful when you don't have approximate +diagonal of Hessian, but you know that your variables are badly scaled +(for example, one variable is in [1,10], and another in [1000,100000]), +and most part of the ill-conditioning comes from different scales of vars. + +In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), +can greatly improve convergence. + +IMPRTANT: you should set scale of your variables with MinBLEICSetScale() +call (before or after MinBLEICSetPrecScale() call). Without knowledge of +the scale of your variables scale-based preconditioner will be just unit +matrix. + +INPUT PARAMETERS: + State - structure which stores algorithm state + + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbleicsetprecscale( + minbleicstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets scaling coefficients for BLEIC optimizer. ALGLIB optimizers use scaling matrices to test stopping conditions (step size and gradient are scaled before comparison with tolerances). Scale of @@ -25848,15 +27379,19 @@ a) "how large" the variable is b) how large the step should be to make significant changes in the function -Generally, scale is NOT considered to be a form of preconditioner. But LM -optimizer is unique in that it uses scaling matrix both in the stopping -condition tests and as Marquardt damping factor. +Scaling is also used by finite difference variant of the optimizer - step +along I-th axis is equal to DiffStep*S[I]. -Proper scaling is very important for the algorithm performance. It is less -important for the quality of results, but still has some influence (it is -easier to converge when variables are properly scaled, so premature -stopping is possible when very badly scalled variables are combined with -relaxed stopping conditions). +In most optimizers (and in the BLEIC too) scaling is NOT a form of +preconditioning. It just affects stopping conditions. You should set +preconditioner by separate call to one of the MinBLEICSetPrec...() +functions. + +There is a special preconditioning mode, however, which uses scaling +coefficients to form diagonal preconditioning matrix. You can turn this +mode on, if you want. But you should understand that scaling is not the +same thing as preconditioning - these are two different, although related +forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state @@ -25866,14 +27401,25 @@ -- ALGLIB -- Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmsetscale(minlmstate state, real_1d_array s); +
    void alglib::minbleicsetscale( + minbleicstate state, + real_1d_array s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* This function sets maximum step length +IMPORTANT: this feature is hard to combine with preconditioning. You can't +set upper limit on step length, when you solve optimization problem with +linear (non-boundary) constraints AND preconditioner turned on. + +When non-boundary constraints are present, you have to either a) use +preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH! +In this case algorithm will terminate with appropriate error code. + INPUT PARAMETERS: State - structure which stores algorithm state StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't @@ -25881,21 +27427,20 @@ Use this subroutine when you optimize target function which contains exp() or other fast growing functions, and optimization algorithm makes too -large steps which leads to overflow. This function allows us to reject +large steps which lead to overflow. This function allows us to reject steps that are too large (and therefore expose us to the possible overflow) without actually calculating function value at the x+stp*d. -NOTE: non-zero StpMax leads to moderate performance degradation because -intermediate step of preconditioned L-BFGS optimization is incompatible -with limits on step size. - -- ALGLIB -- Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmsetstpmax(minlmstate state, double stpmax); +
    void alglib::minbleicsetstpmax( + minbleicstate state, + double stpmax, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* This function turns on/off reporting. @@ -25905,16 +27450,18 @@ NeedXRep- whether iteration reports are needed or not If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS -iterations are reported. +provided to MinBLEICOptimize(). -- ALGLIB -- - Copyright 02.04.2010 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minlmsetxrep(minlmstate state, bool needxrep); +
    void alglib::minbleicsetxrep( + minbleicstate state, + bool needxrep, + const xparams _params = alglib::xdefault);
    - +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -25923,13 +27470,6 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void function1_func(const real_1d_array &x, double &func, void *ptr)
    -{
    -    //
    -    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    -    //
    -    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    -}
     void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
     {
         //
    @@ -25940,56 +27480,92 @@
         grad[0] = 400*pow(x[0]+3,3);
         grad[1] = 4*pow(x[1]-3,3);
     }
    -void function1_hess(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr)
    -{
    -    //
    -    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    -    // its derivatives df/d0 and df/dx1
    -    // and its Hessian.
    -    //
    -    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    -    grad[0] = 400*pow(x[0]+3,3);
    -    grad[1] = 4*pow(x[1]-3,3);
    -    hess[0][0] = 1200*pow(x[0]+3,2);
    -    hess[0][1] = 0;
    -    hess[1][0] = 0;
    -    hess[1][1] = 12*pow(x[1]-3,2);
    -}
     
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of F(x0,x1) = 100*(x0+3)^4+(x1-3)^4
    -    // using "FGH" mode of the Levenberg-Marquardt optimizer.
    +    // This example demonstrates minimization of
         //
    -    // F is treated like a monolitic function without internal structure,
    -    // i.e. we do NOT represent it as a sum of squares.
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
         //
    -    // Optimization algorithm uses:
    -    // * function value F(x0,x1)
    -    // * gradient G={dF/dxi}
    -    // * Hessian H={d2F/(dxi*dxj)}
    +    // subject to box constraints
    +    //
    +    //     -1<=x<=+1, -1<=y<=+1
    +    //
    +    // using BLEIC optimizer with:
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see minbleicsetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
    +    //
    +    // First, we create optimizer object and tune its properties:
    +    // * set box constraints
    +    // * set variable scales
    +    // * set stopping criteria
         //
         real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[-1,-1]";
    +    real_1d_array bndu = "[+1,+1]";
    +    double epsg = 0;
         double epsf = 0;
    -    double epsx = 0;
    +    double epsx = 0.000001;
         ae_int_t maxits = 0;
    -    minlmstate state;
    -    minlmreport rep;
    +    minbleicstate state;
    +    minbleiccreate(x, state);
    +    minbleicsetbc(state, bndl, bndu);
    +    minbleicsetscale(state, s);
    +    minbleicsetcond(state, epsg, epsf, epsx, maxits);
     
    -    minlmcreatefgh(x, state);
    -    minlmsetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlmoptimize(state, function1_func, function1_grad, function1_hess);
    -    minlmresults(state, x, rep);
    +    //
    +    // Then we activate OptGuard integrity checking.
    +    //
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target function (C0 continuity violation)
    +    // * nonsmoothness of the target function (C1 continuity violation)
    +    // * erroneous analytic gradient, i.e. one inconsistent with actual
    +    //   change in the target/constraints
    +    //
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
    +    //
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION. DO NOT USE IT IN PRODUCTION CODE!!!!!!!
    +    //
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
    +    //
    +    minbleicoptguardsmoothness(state);
    +    minbleicoptguardgradient(state, 0.001);
     
    +    //
    +    // Optimize and evaluate results
    +    //
    +    minbleicreport rep;
    +    alglib::minbleicoptimize(state, function1_grad);
    +    minbleicresults(state, x, rep);
         printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,1]
    +
    +    //
    +    // Check that OptGuard did not report errors
    +    //
    +    // NOTE: want to test OptGuard? Try breaking the gradient - say, add
    +    //       1.0 to some of its components.
    +    //
    +    optguardreport ogrep;
    +    minbleicoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -25998,126 +27574,104 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    -{
    -    //
    -    // this callback calculates
    -    // f0(x0,x1) = 100*(x0+3)^4,
    -    // f1(x0,x1) = (x1-3)^4
    -    //
    -    fi[0] = 10*pow(x[0]+3,2);
    -    fi[1] = pow(x[1]-3,2);
    -}
    -void  function2_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
     {
         //
    -    // this callback calculates
    -    // f0(x0,x1) = x0^2+1
    -    // f1(x0,x1) = x1-1
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // and its derivatives df/d0 and df/dx1
         //
    -    fi[0] = x[0]*x[0]+1;
    -    fi[1] = x[1]-1;
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
     }
     
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    +    // This example demonstrates minimization of
         //
    -    //     f0(x0,x1) = 10*(x0+3)^2
    -    //     f1(x0,x1) = (x1-3)^2
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
         //
    -    // using several starting points and efficient restarts.
    +    // subject to inequality constraints
         //
    -    real_1d_array x;
    -    double epsg = 0.0000000001;
    -    double epsf = 0;
    -    double epsx = 0;
    -    ae_int_t maxits = 0;
    -    minlmstate state;
    -    minlmreport rep;
    -
    +    // * x>=2 (posed as general linear constraint),
    +    // * x+y>=6
         //
    -    // create optimizer using minlmcreatev()
    +    // using BLEIC optimizer with
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see minbleicsetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
    +    //
    +    // First, we create optimizer object and tune its properties:
    +    // * set linear constraints
    +    // * set variable scales
    +    // * set stopping criteria
         //
    -    x = "[10,10]";
    -    minlmcreatev(2, x, 0.0001, state);
    -    minlmsetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlmoptimize(state, function1_fvec);
    -    minlmresults(state, x, rep);
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +    real_1d_array x = "[5,5]";
    +    real_1d_array s = "[1,1]";
    +    real_2d_array c = "[[1,0,2],[1,1,6]]";
    +    integer_1d_array ct = "[1,1]";
    +    minbleicstate state;
    +    double epsg = 0;
    +    double epsf = 0;
    +    double epsx = 0.000001;
    +    ae_int_t maxits = 0;
    +
    +    minbleiccreate(x, state);
    +    minbleicsetlc(state, c, ct);
    +    minbleicsetscale(state, s);
    +    minbleicsetcond(state, epsg, epsf, epsx, maxits);
     
         //
    -    // restart optimizer using minlmrestartfrom()
    +    // Then we activate OptGuard integrity checking.
         //
    -    // we can use different starting point, different function,
    -    // different stopping conditions, but problem size
    -    // must remain unchanged.
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target function (C0 continuity violation)
    +    // * nonsmoothness of the target function (C1 continuity violation)
    +    // * erroneous analytic gradient, i.e. one inconsistent with actual
    +    //   change in the target/constraints
         //
    -    x = "[4,4]";
    -    minlmrestartfrom(state, x);
    -    alglib::minlmoptimize(state, function2_fvec);
    -    minlmresults(state, x, rep);
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [0,1]
    -    return 0;
    -}
    -
    -
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    -
    -using namespace alglib;
    -void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    -{
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
         //
    -    // this callback calculates
    -    // f0(x0,x1) = 100*(x0+3)^4,
    -    // f1(x0,x1) = (x1-3)^4
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION. DO NOT USE IT IN PRODUCTION CODE!!!!!!!
         //
    -    fi[0] = 10*pow(x[0]+3,2);
    -    fi[1] = pow(x[1]-3,2);
    -}
    -
    -int main(int argc, char **argv)
    -{
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
         //
    -    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    +    minbleicoptguardsmoothness(state);
    +    minbleicoptguardgradient(state, 0.001);
    +
         //
    -    //     f0(x0,x1) = 10*(x0+3)^2
    -    //     f1(x0,x1) = (x1-3)^2
    +    // Optimize and evaluate results
         //
    -    // using "V" mode of the Levenberg-Marquardt optimizer.
    +    minbleicreport rep;
    +    alglib::minbleicoptimize(state, function1_grad);
    +    minbleicresults(state, x, rep);
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2,4]
    +
         //
    -    // Optimization algorithm uses:
    -    // * function vector f[] = {f1,f2}
    +    // Check that OptGuard did not report errors
         //
    -    // No other information (Jacobian, gradient, etc.) is needed.
    +    // NOTE: want to test OptGuard? Try breaking the gradient - say, add
    +    //       1.0 to some of its components.
         //
    -    real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    -    double epsf = 0;
    -    double epsx = 0;
    -    ae_int_t maxits = 0;
    -    minlmstate state;
    -    minlmreport rep;
    -
    -    minlmcreatev(2, x, 0.0001, state);
    -    minlmsetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlmoptimize(state, function1_fvec);
    -    minlmresults(state, x, rep);
    -
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +    optguardreport ogrep;
    +    minbleicoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -26126,157 +27680,125 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +void function1_func(const real_1d_array &x, double &func, void *ptr)
     {
         //
    -    // this callback calculates
    -    // f0(x0,x1) = 100*(x0+3)^4,
    -    // f1(x0,x1) = (x1-3)^4
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
         //
    -    fi[0] = 10*pow(x[0]+3,2);
    -    fi[1] = pow(x[1]-3,2);
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
     }
     
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    -    //
    -    //     f0(x0,x1) = 10*(x0+3)^2
    -    //     f1(x0,x1) = (x1-3)^2
    -    //
    -    // with boundary constraints
    +    // This example demonstrates minimization of
         //
    -    //     -1 <= x0 <= +1
    -    //     -1 <= x1 <= +1
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
         //
    -    // using "V" mode of the Levenberg-Marquardt optimizer.
    +    // subject to box constraints
         //
    -    // Optimization algorithm uses:
    -    // * function vector f[] = {f1,f2}
    +    //     -1<=x<=+1, -1<=y<=+1
         //
    -    // No other information (Jacobian, gradient, etc.) is needed.
    +    // using BLEIC optimizer with:
    +    // * numerical differentiation being used
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see minbleicsetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
    +    //
    +    // First, we create optimizer object and tune its properties:
    +    // * set box constraints
    +    // * set variable scales
    +    // * set stopping criteria
         //
         real_1d_array x = "[0,0]";
    +    real_1d_array s = "[1,1]";
         real_1d_array bndl = "[-1,-1]";
         real_1d_array bndu = "[+1,+1]";
    -    double epsg = 0.0000000001;
    +    minbleicstate state;
    +    double epsg = 0;
         double epsf = 0;
    -    double epsx = 0;
    +    double epsx = 0.000001;
         ae_int_t maxits = 0;
    -    minlmstate state;
    -    minlmreport rep;
    -
    -    minlmcreatev(2, x, 0.0001, state);
    -    minlmsetbc(state, bndl, bndu);
    -    minlmsetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlmoptimize(state, function1_fvec);
    -    minlmresults(state, x, rep);
    -
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,+1]
    -    return 0;
    -}
    -
    +    double diffstep = 1.0e-6;
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +    minbleiccreatef(x, diffstep, state);
    +    minbleicsetbc(state, bndl, bndu);
    +    minbleicsetscale(state, s);
    +    minbleicsetcond(state, epsg, epsf, epsx, maxits);
     
    -using namespace alglib;
    -void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    -{
         //
    -    // this callback calculates
    -    // f0(x0,x1) = 100*(x0+3)^4,
    -    // f1(x0,x1) = (x1-3)^4
    +    // Then we activate OptGuard integrity checking.
         //
    -    fi[0] = 10*pow(x[0]+3,2);
    -    fi[1] = pow(x[1]-3,2);
    -}
    -void  function1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    -{
    +    // Numerical differentiation always produces "correct" gradient
    +    // (with some truncation error, but unbiased). Thus, we just have
    +    // to check smoothness properties of the target: C0 and C1 continuity.
         //
    -    // this callback calculates
    -    // f0(x0,x1) = 100*(x0+3)^4,
    -    // f1(x0,x1) = (x1-3)^4
    -    // and Jacobian matrix J = [dfi/dxj]
    +    // Sometimes user accidentally tries to solve nonsmooth problems
    +    // with smooth optimizer. OptGuard helps to detect such situations
    +    // early, at the prototyping stage.
         //
    -    fi[0] = 10*pow(x[0]+3,2);
    -    fi[1] = pow(x[1]-3,2);
    -    jac[0][0] = 20*(x[0]+3);
    -    jac[0][1] = 0;
    -    jac[1][0] = 0;
    -    jac[1][1] = 2*(x[1]-3);
    -}
    +    minbleicoptguardsmoothness(state);
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
         //
    -    //     f0(x0,x1) = 10*(x0+3)^2
    -    //     f1(x0,x1) = (x1-3)^2
    +    // Optimize and evaluate results
         //
    -    // using "VJ" mode of the Levenberg-Marquardt optimizer.
    +    minbleicreport rep;
    +    alglib::minbleicoptimize(state, function1_func);
    +    minbleicresults(state, x, rep);
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,1]
    +
         //
    -    // Optimization algorithm uses:
    -    // * function vector f[] = {f1,f2}
    -    // * Jacobian matrix J = {dfi/dxj}.
    +    // Check that OptGuard did not report errors
         //
    -    real_1d_array x = "[0,0]";
    -    double epsg = 0.0000000001;
    -    double epsf = 0;
    -    double epsx = 0;
    -    ae_int_t maxits = 0;
    -    minlmstate state;
    -    minlmreport rep;
    -
    -    minlmcreatevj(2, x, state);
    -    minlmsetcond(state, epsg, epsf, epsx, maxits);
    -    alglib::minlmoptimize(state, function1_fvec, function1_jac);
    -    minlmresults(state, x, rep);
    -
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +    // Want to challenge OptGuard? Try to make your problem
    +    // nonsmooth by replacing 100*(x+3)^4 by 100*|x+3| and
    +    // re-run optimizer.
    +    //
    +    optguardreport ogrep;
    +    minbleicoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    + - +
     
    /************************************************************************* This structure stores optimization report: @@ -26289,8 +27811,6 @@ TerminationType field contains completion code, which can be: -8 internal integrity control detected infinite or NAN values in function/gradient. Abnormal termination signalled. - -7 gradient verification failed. - See MinNLCSetGradientCheck() for more information. 1 relative function improvement is no more than EpsF. 2 relative step is no more than EpsX. 4 gradient norm is no more than EpsG @@ -26298,577 +27818,682 @@ 7 stopping conditions are too stringent, further improvement is impossible, X contains best point found so far. + 8 terminated by user who called mincgrequesttermination(). X contains + point which was "current accepted" when termination request was + submitted. Other fields of this structure are not documented and should not be used! *************************************************************************/ -
    class minnlcreport +
    class mincgreport { ae_int_t iterationscount; ae_int_t nfev; - ae_int_t varidx; - ae_int_t funcidx; ae_int_t terminationtype; - ae_int_t dbgphase0its; };
    - +
     
    /************************************************************************* -This object stores nonlinear optimizer state. -You should use functions provided by MinNLC subpackage to work with this -object +This object stores state of the nonlinear CG optimizer. + +You should use ALGLIB functions to work with this object. *************************************************************************/ -
    class minnlcstate +
    class mincgstate { };
    - +
     
    /************************************************************************* - NONLINEARLY CONSTRAINED OPTIMIZATION - WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM + NONLINEAR CONJUGATE GRADIENT METHOD DESCRIPTION: -The subroutine minimizes function F(x) of N arguments subject to any -combination of: -* bound constraints -* linear inequality constraints -* linear equality constraints -* nonlinear equality constraints Gi(x)=0 -* nonlinear inequality constraints Hi(x)<=0 - -REQUIREMENTS: -* user must provide function value and gradient for F(), H(), G() -* starting point X0 must be feasible or not too far away from the feasible - set -* F(), G(), H() are twice continuously differentiable on the feasible set - and its neighborhood -* nonlinear constraints G() and H() must have non-zero gradient at G(x)=0 - and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is - NOT supported. - -USAGE: - -Constrained optimization if far more complex than the unconstrained one. -Nonlinearly constrained optimization is one of the most esoteric numerical -procedures. - -Here we give very brief outline of the MinNLC optimizer. We strongly -recommend you to study examples in the ALGLIB Reference Manual and to read -ALGLIB User Guide on optimization, which is available at -http://www.alglib.net/optimization/ - -1. User initializes algorithm state with MinNLCCreate() call and chooses - what NLC solver to use. There is some solver which is used by default, - with default settings, but you should NOT rely on default choice. It - may change in future releases of ALGLIB without notice, and no one can - guarantee that new solver will be able to solve your problem with - default settings. - - From the other side, if you choose solver explicitly, you can be pretty - sure that it will work with new ALGLIB releases. - - In the current release following solvers can be used: - * AUL solver (activated with MinNLCSetAlgoAUL() function) - -2. User adds boundary and/or linear and/or nonlinear constraints by means - of calling one of the following functions: - a) MinNLCSetBC() for boundary constraints - b) MinNLCSetLC() for linear constraints - c) MinNLCSetNLC() for nonlinear constraints - You may combine (a), (b) and (c) in one optimization problem. - -3. User sets scale of the variables with MinNLCSetScale() function. It is - VERY important to set scale of the variables, because nonlinearly - constrained problems are hard to solve when variables are badly scaled. +The subroutine minimizes function F(x) of N arguments by using one of the +nonlinear conjugate gradient methods. -4. User sets stopping conditions with MinNLCSetCond(). If NLC solver - uses inner/outer iteration layout, this function sets stopping - conditions for INNER iterations. +These CG methods are globally convergent (even on non-convex functions) as +long as grad(f) is Lipschitz continuous in a some neighborhood of the +L = { x : f(x)<=f(x0) }. -5. User chooses one of the preconditioning methods. Preconditioning is - very important for efficient handling of boundary/linear/nonlinear - constraints. Without preconditioning algorithm would require thousands - of iterations even for simple problems. Two preconditioners can be - used: - * approximate LBFGS-based preconditioner which should be used for - problems with almost orthogonal constraints (activated by calling - MinNLCSetPrecInexact) - * exact low-rank preconditiner (activated by MinNLCSetPrecExactLowRank) - which should be used for problems with moderate number of constraints - which do not have to be orthogonal. -6. Finally, user calls MinNLCOptimize() function which takes algorithm - state and pointer (delegate, etc.) to callback function which calculates - F/G/H. +REQUIREMENTS: +Algorithm will request following information during its operation: +* function value F and its gradient G (simultaneously) at given point X -7. User calls MinNLCResults() to get solution -8. Optionally user may call MinNLCRestartFrom() to solve another problem - with same N but another starting point. MinNLCRestartFrom() allows to - reuse already initialized structure. +USAGE: +1. User initializes algorithm state with MinCGCreate() call +2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and + other functions +3. User calls MinCGOptimize() function which takes algorithm state and + pointer (delegate, etc.) to callback function which calculates F/G. +4. User calls MinCGResults() to get solution +5. Optionally, user may call MinCGRestartFrom() to solve another problem + with same N but another starting point and/or another function. + MinCGRestartFrom() allows to reuse already initialized structure. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used - * if not given, automatically determined from size ofX - X - starting point, array[N]: - * it is better to set X to a feasible point - * but X can be infeasible, in which case algorithm will try - to find feasible point first, using X as initial - approximation. + * if not given, automatically determined from size of X + X - starting point, array[0..N-1]. OUTPUT PARAMETERS: - State - structure stores algorithm state + State - structure which stores algorithm state -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlccreate(real_1d_array x, minnlcstate& state); -void alglib::minnlccreate( +
    void alglib::mincgcreate( + real_1d_array x, + mincgstate& state, + const xparams _params = alglib::xdefault); +void alglib::mincgcreate( ae_int_t n, real_1d_array x, - minnlcstate& state); + mincgstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This subroutine is a finite difference variant of MinNLCCreate(). It uses +The subroutine is finite difference variant of MinCGCreate(). It uses finite differences in order to differentiate target function. -Description below contains information which is specific to this function -only. We recommend to read comments on MinNLCCreate() in order to get more -information about creation of NLC optimizer. +Description below contains information which is specific to this function +only. We recommend to read comments on MinCGCreate() in order to get more +information about creation of CG optimizer. INPUT PARAMETERS: N - problem dimension, N>0: * if given, only leading N elements of X are used - * if not given, automatically determined from size ofX - X - starting point, array[N]: - * it is better to set X to a feasible point - * but X can be infeasible, in which case algorithm will try - to find feasible point first, using X as initial - approximation. + * if not given, automatically determined from size of X + X - starting point, array[0..N-1]. DiffStep- differentiation step, >0 OUTPUT PARAMETERS: - State - structure stores algorithm state + State - structure which stores algorithm state NOTES: 1. algorithm uses 4-point central formula for differentiation. 2. differentiation step along I-th axis is equal to DiffStep*S[I] where - S[] is scaling vector which can be set by MinNLCSetScale() call. + S[] is scaling vector which can be set by MinCGSetScale() call. 3. we recommend you to use moderate values of differentiation step. Too - large step will result in too large TRUNCATION errors, while too small - step will result in too large NUMERICAL errors. 1.0E-4 can be good - value to start from. + large step will result in too large truncation errors, while too small + step will result in too large numerical errors. 1.0E-6 can be good + value to start with. 4. Numerical differentiation is very inefficient - one gradient calculation needs 4*N function evaluations. This function will work for any N - either small (1...10), moderate (10...100) or large (100...). However, performance penalty will be too severe for any N's except for small ones. We should also say that code which relies on numerical differentiation - is less robust and precise. Imprecise gradient may slow down - convergence, especially on highly nonlinear problems. + is less robust and precise. L-BFGS needs exact gradient values. + Imprecise gradient may slow down convergence, especially on highly + nonlinear problems. Thus we recommend to use this function for fast prototyping on small- dimensional problems only, and to implement analytical gradient as soon as possible. -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlccreatef( +
    void alglib::mincgcreatef( real_1d_array x, double diffstep, - minnlcstate& state); -void alglib::minnlccreatef( + mincgstate& state, + const xparams _params = alglib::xdefault); +void alglib::mincgcreatef( ae_int_t n, real_1d_array x, double diffstep, - minnlcstate& state); + mincgstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This family of functions is used to launcn iterations of nonlinear optimizer +This function activates/deactivates verification of the user-supplied +analytic gradient. -These functions accept following parameters: - state - algorithm state - fvec - callback which calculates function vector fi[] - at given point x - jac - callback which calculates function vector fi[] - and Jacobian jac at given point x - rep - optional callback which is called after each iteration - can be NULL - ptr - optional pointer which is passed to func/grad/hess/jac/rep - can be NULL +Upon activation of this option OptGuard integrity checker performs +numerical differentiation of your target function at the initial point +(note: future versions may also perform check at the final point) and +compares numerical gradient with analytic one provided by you. +If difference is too large, an error flag is set and optimization session +continues. After optimization session is over, you can retrieve the report +which stores both gradients and specific components highlighted as +suspicious by the OptGuard. -NOTES: +The primary OptGuard report can be retrieved with mincgoptguardresults(). -1. This function has two different implementations: one which uses exact - (analytical) user-supplied Jacobian, and one which uses only function - vector and numerically differentiates function in order to obtain - gradient. +IMPORTANT: gradient check is a high-overhead option which will cost you + about 3*N additional function evaluations. In many cases it may + cost as much as the rest of the optimization session. - Depending on the specific function used to create optimizer object - you should choose appropriate variant of MinNLCOptimize() - one which - accepts function AND Jacobian or one which accepts ONLY function. + YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO + CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. - Be careful to choose variant of MinNLCOptimize() which corresponds to - your optimization scheme! Table below lists different combinations of - callback (function/gradient) passed to MinNLCOptimize() and specific - function used to create optimizer. +NOTE: unlike previous incarnation of the gradient checking code, OptGuard + does NOT interrupt optimization even if it discovers bad gradient. +INPUT PARAMETERS: + State - structure used to store algorithm state + TestStep - verification step used for numerical differentiation: + * TestStep=0 turns verification off + * TestStep>0 activates verification + You should carefully choose TestStep. Value which is + too large (so large that function behavior is non- + cubic at this scale) will lead to false alarms. Too + short step will result in rounding errors dominating + numerical derivative. - | USER PASSED TO MinNLCOptimize() - CREATED WITH | function only | function and gradient - ------------------------------------------------------------ - MinNLCCreateF() | works FAILS - MinNLCCreate() | FAILS works + You may use different step for different parameters by + means of setting scale with mincgsetscale(). - Here "FAILS" denotes inappropriate combinations of optimizer creation - function and MinNLCOptimize() version. Attemps to use such - combination will lead to exception. Either you did not pass gradient - when it WAS needed or you passed gradient when it was NOT needed. +=== EXPLANATION ========================================================== + +In order to verify gradient algorithm performs following steps: + * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], + where X[i] is i-th component of the initial point and S[i] is a scale + of i-th parameter + * F(X) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ -
    void minnlcoptimize(minnlcstate &state, - void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minnlcoptimize(minnlcstate &state, - void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); +
    void alglib::mincgoptguardgradient( + mincgstate state, + double teststep, + const xparams _params = alglib::xdefault); +
    -

    Examples:   [1]  [2]  [3]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine restarts algorithm from new point. -All optimization parameters (including constraints) are left unchanged. +Detailed results of the OptGuard integrity check for nonsmoothness test #0 + +Nonsmoothness (non-C1) test #0 studies function values (not gradient!) +obtained during line searches and monitors behavior of the directional +derivative estimate. + +This test is less powerful than test #1, but it does not depend on the +gradient values and thus it is more robust against artifacts introduced by +numerical differentiation. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== -This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #0 "strong" report + lngrep - C1 test #0 "long" report + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mincgoptguardnonc1test0results( + mincgstate state, + optguardnonc1test0report& strrep, + optguardnonc1test0report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #1 + +Nonsmoothness (non-C1) test #1 studies individual components of the +gradient computed during line search. + +When precise analytic gradient is provided this test is more powerful than +test #0 which works with function values and ignores user-provided +gradient. However, test #0 becomes more powerful when numerical +differentiation is employed (in such cases test #1 detects higher levels +of numerical noise and becomes too conservative). + +This test also tells specific components of the gradient which violate C1 +continuity, which makes it more informative than #0, which just tells that +continuity is violated. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* vidx - is an index of the variable in [0,N) with nonsmooth derivative +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], g[] - arrays of length CNT which store step lengths and gradient + values at these points; g[i] is evaluated in x0+stp[i]*d and contains + vidx-th component of the gradient. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== INPUT PARAMETERS: - State - structure previously allocated with MinNLCCreate call. - X - new starting point. + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #1 "strong" report + lngrep - C1 test #1 "long" report -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcrestartfrom(minnlcstate state, real_1d_array x); +
    void alglib::mincgoptguardnonc1test1results( + mincgstate state, + optguardnonc1test1report& strrep, + optguardnonc1test1report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Results of OptGuard integrity check, should be called after optimization +session is over. + +=== PRIMARY REPORT ======================================================= + +OptGuard performs several checks which are intended to catch common errors +in the implementation of nonlinear function/gradient: +* incorrect analytic gradient +* discontinuous (non-C0) target functions (constraints) +* nonsmooth (non-C1) target functions (constraints) + +Each of these checks is activated with appropriate function: +* mincgoptguardgradient() for gradient verification +* mincgoptguardsmoothness() for C0/C1 checks + +Following flags are set when these errors are suspected: +* rep.badgradsuspected, and additionally: + * rep.badgradvidx for specific variable (gradient element) suspected + * rep.badgradxbase, a point where gradient is tested + * rep.badgraduser, user-provided gradient (stored as 2D matrix with + single row in order to make report structure compatible with more + complex optimizers like MinNLC or MinLM) + * rep.badgradnum, reference gradient obtained via numerical + differentiation (stored as 2D matrix with single row in order to make + report structure compatible with more complex optimizers like MinNLC + or MinLM) +* rep.nonc0suspected +* rep.nonc1suspected + +=== ADDITIONAL REPORTS/LOGS ============================================== + +Several different tests are performed to catch C0/C1 errors, you can find +out specific test signaled error by looking to: +* rep.nonc0test0positive, for non-C0 test #0 +* rep.nonc1test0positive, for non-C1 test #0 +* rep.nonc1test1positive, for non-C1 test #1 + +Additional information (including line search logs) can be obtained by +means of: +* mincgoptguardnonc1test0results() +* mincgoptguardnonc1test1results() +which return detailed error reports, specific points where discontinuities +were found, and so on. -
    - -
    -
    /************************************************************************* -MinNLC results +========================================================================== INPUT PARAMETERS: - State - algorithm state + state - algorithm state OUTPUT PARAMETERS: - X - array[0..N-1], solution - Rep - optimization report. You should check Rep.TerminationType - in order to distinguish successful termination from - unsuccessful one: - * -8 internal integrity control detected infinite or - NAN values in function/gradient. Abnormal - termination signalled. - * -7 gradient verification failed. - See MinNLCSetGradientCheck() for more information. - * 1 relative function improvement is no more than EpsF. - * 2 scaled step is no more than EpsX. - * 4 scaled gradient norm is no more than EpsG. - * 5 MaxIts steps was taken - More information about fields of this structure can be - found in the comments on MinNLCReport datatype. + rep - generic OptGuard report; more detailed reports can be + retrieved with other functions. + +NOTE: false negatives (nonsmooth problems are not identified as nonsmooth + ones) are possible although unlikely. + + The reason is that you need to make several evaluations around + nonsmoothness in order to accumulate enough information about + function curvature. Say, if you start right from the nonsmooth point, + optimizer simply won't get enough data to understand what is going + wrong before it terminates due to abrupt changes in the derivative. + It is also possible that "unlucky" step will move us to the + termination too quickly. + + Our current approach is to have less than 0.1% false negatives in + our test examples (measured with multiple restarts from random + points), and to have exactly 0% false positives. -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcresults( - minnlcstate state, - real_1d_array& x, - minnlcreport& rep); +
    void alglib::mincgoptguardresults( + mincgstate state, + optguardreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -NLC results +This function activates/deactivates nonsmoothness monitoring option of +the OptGuard integrity checker. Smoothness monitor silently observes +solution process and tries to detect ill-posed problems, i.e. ones with: +a) discontinuous target function (non-C0) +b) nonsmooth target function (non-C1) -Buffered implementation of MinNLCResults() which uses pre-allocated buffer -to store X[]. If buffer size is too small, it resizes buffer. It is -intended to be used in the inner cycles of performance critical algorithms -where array reallocation penalty is too large to be ignored. +Smoothness monitoring does NOT interrupt optimization even if it suspects +that your problem is nonsmooth. It just sets corresponding flags in the +OptGuard report which can be retrieved after optimization is over. + +Smoothness monitoring is a moderate overhead option which often adds less +than 1% to the optimizer running time. Thus, you can use it even for large +scale problems. + +NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 + continuity violations. + + First, minor errors are hard to catch - say, a 0.0001 difference in + the model values at two sides of the gap may be due to discontinuity + of the model - or simply because the model has changed. + + Second, C1-violations are especially difficult to detect in a + noninvasive way. The optimizer usually performs very short steps + near the nonsmoothness, and differentiation usually introduces a + lot of numerical noise. It is hard to tell whether some tiny + discontinuity in the slope is due to real nonsmoothness or just due + to numerical noise alone. + + Our top priority was to avoid false positives, so in some rare cases + minor errors may went unnoticed (however, in most cases they can be + spotted with restart from different initial point). + +INPUT PARAMETERS: + state - algorithm state + level - monitoring level: + * 0 - monitoring is disabled + * 1 - noninvasive low-overhead monitoring; function values + and/or gradients are recorded, but OptGuard does not + try to perform additional evaluations in order to + get more information about suspicious locations. + +=== EXPLANATION ========================================================== + +One major source of headache during optimization is the possibility of +the coding errors in the target function/constraints (or their gradients). +Such errors most often manifest themselves as discontinuity or +nonsmoothness of the target/constraints. + +Another frequent situation is when you try to optimize something involving +lots of min() and max() operations, i.e. nonsmooth target. Although not a +coding error, it is nonsmoothness anyway - and smooth optimizers usually +stop right after encountering nonsmoothness, well before reaching solution. + +OptGuard integrity checker helps you to catch such situations: it monitors +function values/gradients being passed to the optimizer and tries to +errors. Upon discovering suspicious pair of points it raises appropriate +flag (and allows you to continue optimization). When optimization is done, +you can study OptGuard result. -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 21.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcresultsbuf( - minnlcstate state, - real_1d_array& x, - minnlcreport& rep); +
    void alglib::mincgoptguardsmoothness( + mincgstate state, + const xparams _params = alglib::xdefault); +void alglib::mincgoptguardsmoothness( + mincgstate state, + ae_int_t level, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function tells MinNLC unit to use Augmented Lagrangian algorithm -for nonlinearly constrained optimization. This algorithm is a slight -modification of one described in "A Modified Barrier-Augmented Lagrangian -Method for Constrained Minimization (1999)" by D.GOLDFARB, R.POLYAK, -K. SCHEINBERG, I.YUZEFOVICH. - -Augmented Lagrangian algorithm works by converting problem of minimizing -F(x) subject to equality/inequality constraints to unconstrained problem -of the form - - min[ f(x) + - + Rho*PENALTY_EQ(x) + SHIFT_EQ(x,Nu1) + - + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ] - -where: -* Rho is a fixed penalization coefficient -* PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY enforce - equality constraints -* SHIFT_EQ(x) is a special "shift" term which is used to "fine-tune" - equality constraints, greatly increasing precision -* PENALTY_INEQ(x) is a penalty term which is used to approximately enforce - inequality constraints -* SHIFT_INEQ(x) is a special "shift" term which is used to "fine-tune" - inequality constraints, greatly increasing precision -* Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during - outer iterations of algorithm - -This version of AUL algorithm uses preconditioner, which greatly -accelerates convergence. Because this algorithm is similar to penalty -methods, it may perform steps into infeasible area. All kinds of -constraints (boundary, linear and nonlinear ones) may be violated in -intermediate points - and in the solution. However, properly configured -AUL method is significantly better at handling constraints than barrier -and/or penalty methods. - -The very basic outline of algorithm is given below: -1) first outer iteration is performed with "default" values of Lagrange - multipliers Nu1/Nu2. Solution quality is low (candidate point can be - too far away from true solution; large violation of constraints is - possible) and is comparable with that of penalty methods. -2) subsequent outer iterations refine Lagrange multipliers and improve - quality of the solution. - -INPUT PARAMETERS: - State - structure which stores algorithm state - Rho - penalty coefficient, Rho>0: - * large enough that algorithm converges with desired - precision. Minimum value is 10*max(S'*diag(H)*S), where - S is a scale matrix (set by MinNLCSetScale) and H is a - Hessian of the function being minimized. If you can not - easily estimate Hessian norm, see our recommendations - below. - * not TOO large to prevent ill-conditioning - * for unit-scale problems (variables and Hessian have unit - magnitude), Rho=100 or Rho=1000 can be used. - * it is important to note that Rho is internally multiplied - by scaling matrix, i.e. optimum value of Rho depends on - scale of variables specified by MinNLCSetScale(). - ItsCnt - number of outer iterations: - * ItsCnt=0 means that small number of outer iterations is - automatically chosen (10 iterations in current version). - * ItsCnt=1 means that AUL algorithm performs just as usual - barrier method. - * ItsCnt>1 means that AUL algorithm performs specified - number of outer iterations +This family of functions is used to launcn iterations of nonlinear optimizer -HOW TO CHOOSE PARAMETERS +These functions accept following parameters: + state - algorithm state + func - callback which calculates function (or merit function) + value func at given point x + grad - callback which calculates function (or merit function) + value func and gradient grad at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL -Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm -is sometimes hard to tune. Good values of Rho and ItsCnt are problem- -specific. In order to help you we prepared following set of -recommendations: +NOTES: -* for unit-scale problems (variables and Hessian have unit magnitude), - Rho=100 or Rho=1000 can be used. +1. This function has two different implementations: one which uses exact + (analytical) user-supplied gradient, and one which uses function value + only and numerically differentiates function in order to obtain + gradient. -* start from some small value of Rho and solve problem with just one - outer iteration (ItcCnt=1). In this case algorithm behaves like penalty - method. Increase Rho in 2x or 10x steps until you see that one outer - iteration returns point which is "rough approximation to solution". + Depending on the specific function used to create optimizer object + (either MinCGCreate() for analytical gradient or MinCGCreateF() for + numerical differentiation) you should choose appropriate variant of + MinCGOptimize() - one which accepts function AND gradient or one which + accepts function ONLY. - It is very important to have Rho so large that penalty term becomes - constraining i.e. modified function becomes highly convex in constrained - directions. + Be careful to choose variant of MinCGOptimize() which corresponds to + your optimization scheme! Table below lists different combinations of + callback (function/gradient) passed to MinCGOptimize() and specific + function used to create optimizer. - From the other side, too large Rho may prevent you from converging to - the solution. You can diagnose it by studying number of inner iterations - performed by algorithm: too few (5-10 on 1000-dimensional problem) or - too many (orders of magnitude more than dimensionality) usually means - that Rho is too large. -* with just one outer iteration you usually have low-quality solution. - Some constraints can be violated with very large margin, while other - ones (which are NOT violated in the true solution) can push final point - too far in the inner area of the feasible set. + | USER PASSED TO MinCGOptimize() + CREATED WITH | function only | function and gradient + ------------------------------------------------------------ + MinCGCreateF() | work FAIL + MinCGCreate() | FAIL work - For example, if you have constraint x0>=0 and true solution x0=1, then - merely a presence of "x0>=0" will introduce a bias towards larger values - of x0. Say, algorithm may stop at x0=1.5 instead of 1.0. + Here "FAIL" denotes inappropriate combinations of optimizer creation + function and MinCGOptimize() version. Attemps to use such combination + (for example, to create optimizer with MinCGCreateF() and to pass + gradient information to MinCGOptimize()) will lead to exception being + thrown. Either you did not pass gradient when it WAS needed or you + passed gradient when it was NOT needed. -* after you found good Rho, you may increase number of outer iterations. - ItsCnt=10 is a good value. Subsequent outer iteration will refine values - of Lagrange multipliers. Constraints which were violated will be - enforced, inactive constraints will be dropped (corresponding multipliers - will be decreased). Ideally, you should see 10-1000x improvement in - constraint handling (constraint violation is reduced). + -- ALGLIB -- + Copyright 20.04.2009 by Bochkanov Sergey +*************************************************************************/ +
    void mincgoptimize(mincgstate &state, + void (*func)(const real_1d_array &x, double &func, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void mincgoptimize(mincgstate &state, + void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This subroutine submits request for termination of running optimizer. It +should be called from user-supplied callback when user decides that it is +time to "smoothly" terminate optimization process. As result, optimizer +stops at point which was "current accepted" when termination request was +submitted and returns error code 8 (successful termination). -* if you see that algorithm converges to vicinity of solution, but - additional outer iterations do not refine solution, it may mean that - algorithm is unstable - it wanders around true solution, but can not - approach it. Sometimes algorithm may be stabilized by increasing Rho one - more time, making it 5x or 10x larger. +INPUT PARAMETERS: + State - optimizer structure -SCALING OF CONSTRAINTS [IMPORTANT] +NOTE: after request for termination optimizer may perform several + additional calls to user-supplied callbacks. It does NOT guarantee + to stop immediately - it just guarantees that these additional calls + will be discarded later. -AUL optimizer scales variables according to scale specified by -MinNLCSetScale() function, so it can handle problems with badly scaled -variables (as long as we KNOW their scales). However, because function -being optimized is a mix of original function and constraint-dependent -penalty functions, it is important to rescale both variables AND -constraints. +NOTE: calling this function on optimizer which is NOT running will have no + effect. -Say, if you minimize f(x)=x^2 subject to 1000000*x>=0, then you have -constraint whose scale is different from that of target function (another -example is 0.000001*x>=0). It is also possible to have constraints whose -scales are misaligned: 1000000*x0>=0, 0.000001*x1<=0. Inappropriate -scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT -same as minimizing it subject to 1000000*x>=0. +NOTE: multiple calls to this function are possible. First call is counted, + subsequent calls are silently ignored. -Because we know coefficients of boundary/linear constraints, we can -automatically rescale and normalize them. However, there is no way to -automatically rescale nonlinear constraints Gi(x) and Hi(x) - they are -black boxes. + -- ALGLIB -- + Copyright 08.10.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mincgrequesttermination( + mincgstate state, + const xparams _params = alglib::xdefault); -It means that YOU are the one who is responsible for correct scaling of -nonlinear constraints Gi(x) and Hi(x). We recommend you to rescale -nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) -has magnitude approximately equal to 1/S[i] (where S is a scale set by -MinNLCSetScale() function). +
    + +
    +
    /************************************************************************* +This subroutine restarts CG algorithm from new point. All optimization +parameters are left unchanged. -WHAT IF IT DOES NOT CONVERGE? +This function allows to solve multiple optimization problems (which +must have same number of dimensions) without object reallocation penalty. -It is possible that AUL algorithm fails to converge to precise values of -Lagrange multipliers. It stops somewhere around true solution, but candidate -point is still too far from solution, and some constraints are violated. -Such kind of failure is specific for Lagrangian algorithms - technically, -they stop at some point, but this point is not constrained solution. +INPUT PARAMETERS: + State - structure used to store algorithm state. + X - new starting point. -There are exist several reasons why algorithm may fail to converge: -a) too loose stopping criteria for inner iteration -b) degenerate, redundant constraints -c) target function has unconstrained extremum exactly at the boundary of - some constraint -d) numerical noise in the target function + -- ALGLIB -- + Copyright 30.07.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mincgrestartfrom( + mincgstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); -In all these cases algorithm is unstable - each outer iteration results in -large and almost random step which improves handling of some constraints, -but violates other ones (ideally outer iterations should form a sequence -of progressively decreasing steps towards solution). +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +Conjugate gradient results -First reason possible is that too loose stopping criteria for inner -iteration were specified. Augmented Lagrangian algorithm solves a sequence -of intermediate problems, and requries each of them to be solved with high -precision. Insufficient precision results in incorrect update of Lagrange -multipliers. +INPUT PARAMETERS: + State - algorithm state -Another reason is that you may have specified degenerate constraints: say, -some constraint was repeated twice. In most cases AUL algorithm gracefully -handles such situations, but sometimes it may spend too much time figuring -out subtle degeneracies in constraint matrix. +OUTPUT PARAMETERS: + X - array[0..N-1], solution + Rep - optimization report: + * Rep.TerminationType completetion code: + * -8 internal integrity control detected infinite + or NAN values in function/gradient. Abnormal + termination signalled. + * -7 gradient verification failed. + See MinCGSetGradientCheck() for more information. + * 1 relative function improvement is no more than + EpsF. + * 2 relative step is no more than EpsX. + * 4 gradient norm is no more than EpsG + * 5 MaxIts steps was taken + * 7 stopping conditions are too stringent, + further improvement is impossible, + we return best X found so far + * 8 terminated by user + * Rep.IterationsCount contains iterations count + * NFEV countains number of function calculations -Third reason is tricky and hard to diagnose. Consider situation when you -minimize f=x^2 subject to constraint x>=0. Unconstrained extremum is -located exactly at the boundary of constrained area. In this case -algorithm will tend to oscillate between negative and positive x. Each -time it stops at x<0 it "reinforces" constraint x>=0, and each time it is -bounced to x>0 it "relaxes" constraint (and is attracted to x<0). + -- ALGLIB -- + Copyright 20.04.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mincgresults( + mincgstate state, + real_1d_array& x, + mincgreport& rep, + const xparams _params = alglib::xdefault); -Such situation sometimes happens in problems with hidden symetries. -Algorithm is got caught in a loop with Lagrange multipliers being -continuously increased/decreased. Luckily, such loop forms after at least -three iterations, so this problem can be solved by DECREASING number of -outer iterations down to 1-2 and increasing penalty coefficient Rho as -much as possible. +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +Conjugate gradient results -Final reason is numerical noise. AUL algorithm is robust against moderate -noise (more robust than, say, active set methods), but large noise may -destabilize algorithm. +Buffered implementation of MinCGResults(), which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 20.04.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetalgoaul( - minnlcstate state, - double rho, - ae_int_t itscnt); +
    void alglib::mincgresultsbuf( + mincgstate state, + real_1d_array& x, + mincgreport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -This function sets boundary constraints for NLC optimizer. - -Boundary constraints are inactive by default (after initial creation). -They are preserved after algorithm restart with MinNLCRestartFrom(). - -You may combine boundary constraints with general linear ones - and with -nonlinear ones! Boundary constraints are handled more efficiently than -other types. Thus, if your problem has mixed constraints, you may -explicitly specify some of them as boundary and save some time/space. +This function sets CG algorithm. INPUT PARAMETERS: - State - structure stores algorithm state - BndL - lower bounds, array[N]. - If some (all) variables are unbounded, you may specify - very small number or -INF. - BndU - upper bounds, array[N]. - If some (all) variables are unbounded, you may specify - very large number or +INF. - -NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th -variable will be "frozen" at X[i]=BndL[i]=BndU[i]. - -NOTE 2: when you solve your problem with augmented Lagrangian solver, - boundary constraints are satisfied only approximately! It is - possible that algorithm will evaluate function outside of - feasible area! + State - structure which stores algorithm state + CGType - algorithm type: + * -1 automatic selection of the best algorithm + * 0 DY (Dai and Yuan) algorithm + * 1 Hybrid DY-HS algorithm -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetbc( - minnlcstate state, - real_1d_array bndl, - real_1d_array bndu); +
    void alglib::mincgsetcgtype( + mincgstate state, + ae_int_t cgtype, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets stopping conditions for inner iterations of optimizer. +This function sets stopping conditions for CG optimization algorithm. INPUT PARAMETERS: State - structure which stores algorithm state @@ -26878,7 +28503,7 @@ * |.| means Euclidian norm * v - scaled gradient vector, v[i]=g[i]*s[i] * g - gradient - * s - scaling coefficients set by MinNLCSetScale() + * s - scaling coefficients set by MinCGSetScale() EpsF - >=0 The subroutine finishes its work if on k+1-th iteration the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} @@ -26888,333 +28513,230 @@ the condition |v|<=EpsX is fulfilled, where: * |.| means Euclidian norm * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - step vector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinNLCSetScale() + * dx - ste pvector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinCGSetScale() MaxIts - maximum number of iterations. If MaxIts=0, the number of iterations is unlimited. -Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead -to automatic stopping criterion selection. +Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to +automatic stopping criterion selection (small EpsX). -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetcond( - minnlcstate state, +
    void alglib::mincgsetcond( + mincgstate state, double epsg, double epsf, double epsx, - ae_int_t maxits); + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -This subroutine turns on verification of the user-supplied analytic -gradient: -* user calls this subroutine before optimization begins -* MinNLCOptimize() is called -* prior to actual optimization, for each component of parameters being - optimized X[i] algorithm performs following steps: - * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], - where X[i] is i-th component of the initial point and S[i] is a scale - of i-th parameter - * F(X) is evaluated at these trial points - * we perform one more evaluation in the middle point of the interval - * we build cubic model using function values and derivatives at trial - points and we compare its prediction with actual value in the middle - point - * in case difference between prediction and actual value is higher than - some predetermined threshold, algorithm stops with completion code -7; - Rep.VarIdx is set to index of the parameter with incorrect derivative, - and Rep.FuncIdx is set to index of the function. -* after verification is over, algorithm proceeds to the actual optimization. - -NOTE 1: verification needs N (parameters count) gradient evaluations. It - is very costly and you should use it only for low dimensional - problems, when you want to be sure that you've correctly - calculated analytic derivatives. You should not use it in the - production code (unless you want to check derivatives provided by - some third party). - -NOTE 2: you should carefully choose TestStep. Value which is too large - (so large that function behaviour is significantly non-cubic) will - lead to false alarms. You may use different step for different - parameters by means of setting scale with MinNLCSetScale(). - -NOTE 3: this function may lead to false positives. In case it reports that - I-th derivative was calculated incorrectly, you may decrease test - step and try one more time - maybe your function changes too - sharply and your step is too large for such rapidly chanding - function. +Modification of the preconditioner: preconditioning is turned off. INPUT PARAMETERS: - State - structure used to store algorithm state - TestStep - verification step: - * TestStep=0 turns verification off - * TestStep>0 activates verification + State - structure which stores algorithm state + +NOTE: you can change preconditioner "on the fly", during algorithm +iterations. -- ALGLIB -- - Copyright 15.06.2014 by Bochkanov Sergey + Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetgradientcheck(minnlcstate state, double teststep); +
    void alglib::mincgsetprecdefault( + mincgstate state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets linear constraints for MinNLC optimizer. +Modification of the preconditioner: diagonal of approximate Hessian is +used. -Linear constraints are inactive by default (after initial creation). They -are preserved after algorithm restart with MinNLCRestartFrom(). +INPUT PARAMETERS: + State - structure which stores algorithm state + D - diagonal of the approximate Hessian, array[0..N-1], + (if larger, only leading N elements are used). -You may combine linear constraints with boundary ones - and with nonlinear -ones! If your problem has mixed constraints, you may explicitly specify -some of them as linear. It may help optimizer to handle them more -efficiently. +NOTE: you can change preconditioner "on the fly", during algorithm +iterations. -INPUT PARAMETERS: - State - structure previously allocated with MinNLCCreate call. - C - linear constraints, array[K,N+1]. - Each row of C represents one constraint, either equality - or inequality (see below): - * first N elements correspond to coefficients, - * last element corresponds to the right part. - All elements of C (including right part) must be finite. - CT - type of constraints, array[K]: - * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] - * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] - * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] - K - number of equality/inequality constraints, K>=0: - * if given, only leading K elements of C/CT are used - * if not given, automatically determined from sizes of C/CT +NOTE 2: D[i] should be positive. Exception will be thrown otherwise. -NOTE 1: when you solve your problem with augmented Lagrangian solver, - linear constraints are satisfied only approximately! It is - possible that algorithm will evaluate function outside of - feasible area! +NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetlc( - minnlcstate state, - real_2d_array c, - integer_1d_array ct); -void alglib::minnlcsetlc( - minnlcstate state, - real_2d_array c, - integer_1d_array ct, - ae_int_t k); +
    void alglib::mincgsetprecdiag( + mincgstate state, + real_1d_array d, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets nonlinear constraints for MinNLC optimizer. - -In fact, this function sets NUMBER of nonlinear constraints. Constraints -itself (constraint functions) are passed to MinNLCOptimize() method. This -method requires user-defined vector function F[] and its Jacobian J[], -where: -* first component of F[] and first row of Jacobian J[] corresponds to - function being minimized -* next NLEC components of F[] (and rows of J) correspond to nonlinear - equality constraints G_i(x)=0 -* next NLIC components of F[] (and rows of J) correspond to nonlinear - inequality constraints H_i(x)<=0 - -NOTE: you may combine nonlinear constraints with linear/boundary ones. If - your problem has mixed constraints, you may explicitly specify some - of them as linear ones. It may help optimizer to handle them more - efficiently. - -INPUT PARAMETERS: - State - structure previously allocated with MinNLCCreate call. - NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 - NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 +Modification of the preconditioner: scale-based diagonal preconditioning. -NOTE 1: when you solve your problem with augmented Lagrangian solver, - nonlinear constraints are satisfied only approximately! It is - possible that algorithm will evaluate function outside of - feasible area! +This preconditioning mode can be useful when you don't have approximate +diagonal of Hessian, but you know that your variables are badly scaled +(for example, one variable is in [1,10], and another in [1000,100000]), +and most part of the ill-conditioning comes from different scales of vars. -NOTE 2: algorithm scales variables according to scale specified by - MinNLCSetScale() function, so it can handle problems with badly - scaled variables (as long as we KNOW their scales). +In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), +can greatly improve convergence. - However, there is no way to automatically scale nonlinear - constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may - ruin convergence. Solving problem with constraint "1000*G0(x)=0" - is NOT same as solving it with constraint "0.001*G0(x)=0". +IMPRTANT: you should set scale of your variables with MinCGSetScale() call +(before or after MinCGSetPrecScale() call). Without knowledge of the scale +of your variables scale-based preconditioner will be just unit matrix. - It means that YOU are the one who is responsible for correct - scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you - to scale nonlinear constraints in such way that I-th component of - dG/dX (or dH/dx) has approximately unit magnitude (for problems - with unit scale) or has magnitude approximately equal to 1/S[i] - (where S is a scale set by MinNLCSetScale() function). +INPUT PARAMETERS: + State - structure which stores algorithm state +NOTE: you can change preconditioner "on the fly", during algorithm +iterations. -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey + Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetnlc( - minnlcstate state, - ae_int_t nlec, - ae_int_t nlic); +
    void alglib::mincgsetprecscale( + mincgstate state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -This function sets preconditioner to "exact low rank" mode. +This function sets scaling coefficients for CG optimizer. -Preconditioning is very important for convergence of Augmented Lagrangian -algorithm because presence of penalty term makes problem ill-conditioned. -Difference between performance of preconditioned and unpreconditioned -methods can be as large as 100x! +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances). Scale of +the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the function -MinNLC optimizer may utilize two preconditioners, each with its own -benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one. -It also provides special unpreconditioned mode of operation which can be -used for test purposes. Comments below discuss low rank preconditioner. +Scaling is also used by finite difference variant of CG optimizer - step +along I-th axis is equal to DiffStep*S[I]. -Exact low-rank preconditioner uses Woodbury matrix identity to build -quadratic model of the penalized function. It has no special assumptions -about orthogonality, so it is quite general. However, for a N-dimensional -problem with K general linear or nonlinear constraints (boundary ones are -not counted) it has O(N*K^2) cost per iteration (for comparison: inexact -LBFGS-based preconditioner has O(N*K) cost). +In most optimizers (and in the CG too) scaling is NOT a form of +preconditioning. It just affects stopping conditions. You should set +preconditioner by separate call to one of the MinCGSetPrec...() functions. + +There is special preconditioning mode, however, which uses scaling +coefficients to form diagonal preconditioning matrix. You can turn this +mode on, if you want. But you should understand that scaling is not the +same thing as preconditioning - these are two different, although related +forms of tuning solver. INPUT PARAMETERS: State - structure stores algorithm state - UpdateFreq- update frequency. Preconditioner is rebuilt after every - UpdateFreq iterations. Recommended value: 10 or higher. - Zero value means that good default value will be used. + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. -- ALGLIB -- - Copyright 26.09.2014 by Bochkanov Sergey + Copyright 14.01.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetprecexactlowrank( - minnlcstate state, - ae_int_t updatefreq); +
    void alglib::mincgsetscale( + mincgstate state, + real_1d_array s, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -This function sets preconditioner to "inexact LBFGS-based" mode. - -Preconditioning is very important for convergence of Augmented Lagrangian -algorithm because presence of penalty term makes problem ill-conditioned. -Difference between performance of preconditioned and unpreconditioned -methods can be as large as 100x! - -MinNLC optimizer may utilize two preconditioners, each with its own -benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one. -It also provides special unpreconditioned mode of operation which can be -used for test purposes. Comments below discuss LBFGS-based preconditioner. - -Inexact LBFGS-based preconditioner uses L-BFGS formula combined with -orthogonality assumption to perform very fast updates. For a N-dimensional -problem with K general linear or nonlinear constraints (boundary ones are -not counted) it has O(N*K) cost per iteration. This preconditioner has -best quality (less iterations) when general linear and nonlinear -constraints are orthogonal to each other (orthogonality with respect to -boundary constraints is not required). Number of iterations increases when -constraints are non-orthogonal, because algorithm assumes orthogonality, -but still it is better than no preconditioner at all. +This function sets maximum step length INPUT PARAMETERS: - State - structure stores algorithm state + State - structure which stores algorithm state + StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't + want to limit step length. + +Use this subroutine when you optimize target function which contains exp() +or other fast growing functions, and optimization algorithm makes too +large steps which leads to overflow. This function allows us to reject +steps that are too large (and therefore expose us to the possible +overflow) without actually calculating function value at the x+stp*d. -- ALGLIB -- - Copyright 26.09.2014 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetprecinexact(minnlcstate state); +
    void alglib::mincgsetstpmax( + mincgstate state, + double stpmax, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets preconditioner to "turned off" mode. - -Preconditioning is very important for convergence of Augmented Lagrangian -algorithm because presence of penalty term makes problem ill-conditioned. -Difference between performance of preconditioned and unpreconditioned -methods can be as large as 100x! - -MinNLC optimizer may utilize two preconditioners, each with its own -benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one. -It also provides special unpreconditioned mode of operation which can be -used for test purposes. - -This function activates this test mode. Do not use it in production code -to solve real-life problems. +This function turns on/off reporting. INPUT PARAMETERS: - State - structure stores algorithm state + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not + +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to MinCGOptimize(). -- ALGLIB -- - Copyright 26.09.2014 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetprecnone(minnlcstate state); +
    void alglib::mincgsetxrep( + mincgstate state, + bool needxrep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets scaling coefficients for NLC optimizer. +This function allows to suggest initial step length to the CG algorithm. -ALGLIB optimizers use scaling matrices to test stopping conditions (step -size and gradient are scaled before comparison with tolerances). Scale of -the I-th variable is a translation invariant measure of: -a) "how large" the variable is -b) how large the step should be to make significant changes in the function +Suggested step length is used as starting point for the line search. It +can be useful when you have badly scaled problem, i.e. when ||grad|| +(which is used as initial estimate for the first step) is many orders of +magnitude different from the desired step. -Scaling is also used by finite difference variant of the optimizer - step -along I-th axis is equal to DiffStep*S[I]. +Line search may fail on such problems without good estimate of initial +step length. Imagine, for example, problem with ||grad||=10^50 and desired +step equal to 0.1 Line search function will use 10^50 as initial step, +then it will decrease step length by 2 (up to 20 attempts) and will get +10^44, which is still too large. -INPUT PARAMETERS: - State - structure stores algorithm state - S - array[N], non-zero scaling coefficients - S[i] may be negative, sign doesn't matter. +This function allows us to tell than line search should be started from +some moderate step length, like 1.0, so algorithm will be able to detect +desired step length in a several searches. - -- ALGLIB -- - Copyright 06.06.2014 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minnlcsetscale(minnlcstate state, real_1d_array s); +Default behavior (when no step is suggested) is to use preconditioner, if +it is available, to generate initial estimate of step length. -
    -

    Examples:   [1]  [2]  [3]  

    - -
    -
    /************************************************************************* -This function turns on/off reporting. +This function influences only first iteration of algorithm. It should be +called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call. +Suggested step is ignored if you have preconditioner. INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not - -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to MinNLCOptimize(). - -NOTE: algorithm passes two parameters to rep() callback - current point - and penalized function value at current point. Important - function - value which is returned is NOT function being minimized. It is sum - of the value of the function being minimized - and penalty term. + State - structure used to store algorithm state. + Stp - initial estimate of the step length. + Can be zero (no estimate). -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnlcsetxrep(minnlcstate state, bool needxrep); +
    void alglib::mincgsuggeststep( + mincgstate state, + double stp, + const xparams _params = alglib::xdefault);
    - +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -27223,22 +28745,15 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void  nlcfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
     {
         //
    -    // this callback calculates
    -    //
    -    //     f0(x0,x1) = -x0+x1
    -    //     f1(x0,x1) = x0^2+x1^2-1
    -    //
    -    // and Jacobian matrix J = [dfi/dxj]
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // and its derivatives df/d0 and df/dx1
         //
    -    fi[0] = -x[0]+x[1];
    -    fi[1] = x[0]*x[0] + x[1]*x[1] - 1.0;
    -    jac[0][0] = -1.0;
    -    jac[0][1] = +1.0;
    -    jac[1][0] = 2*x[0];
    -    jac[1][1] = 2*x[1];
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
     }
     
     int main(int argc, char **argv)
    @@ -27246,88 +28761,75 @@
         //
         // This example demonstrates minimization of
         //
    -    //     f(x0,x1) = -x0+x1
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
         //
    -    // subject to nonlinear equality constraint
    +    // using nonlinear conjugate gradient method with:
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see mincgsetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
         //
    -    //    x0^2 + x1^2 - 1 = 0
    +    // First, we create optimizer object and tune its properties
         //
    -    real_1d_array x0 = "[0,0]";
    +    real_1d_array x = "[0,0]";
         real_1d_array s = "[1,1]";
         double epsg = 0;
         double epsf = 0;
    -    double epsx = 0.000001;
    +    double epsx = 0.0000000001;
         ae_int_t maxits = 0;
    -    ae_int_t outerits = 5;
    -    ae_int_t updatefreq = 10;
    -    double rho = 1000;
    -    minnlcstate state;
    -    minnlcreport rep;
    -    real_1d_array x1;
    +    mincgstate state;
    +    mincgcreate(x, state);
    +    mincgsetcond(state, epsg, epsf, epsx, maxits);
    +    mincgsetscale(state, s);
     
         //
    -    // Create optimizer object, choose AUL algorithm and tune its settings:
    -    // * rho=1000       penalty coefficient
    -    // * outerits=5     number of outer iterations to tune Lagrange coefficients
    -    // * epsx=0.000001  stopping condition for inner iterations
    -    // * s=[1,1]        all variables have unit scale
    -    // * exact low-rank preconditioner is used, updated after each 10 iterations
    -    //
    -    minnlccreate(2, x0, state);
    -    minnlcsetalgoaul(state, rho, outerits);
    -    minnlcsetcond(state, epsg, epsf, epsx, maxits);
    -    minnlcsetscale(state, s);
    -    minnlcsetprecexactlowrank(state, updatefreq);
    -
    +    // Activate OptGuard integrity checking.
         //
    -    // Set constraints:
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target function (C0 continuity violation)
    +    // * nonsmoothness of the target function (C1 continuity violation)
    +    // * erroneous analytic gradient, i.e. one inconsistent with actual
    +    //   change in the target/constraints
         //
    -    // Nonlinear constraints are tricky - you can not "pack" general
    -    // nonlinear function into double precision array. That's why
    -    // minnlcsetnlc() does not accept constraints itself - only constraint
    -    // counts are passed: first parameter is number of equality constraints,
    -    // second one is number of inequality constraints.
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
         //
    -    // As for constraining functions - these functions are passed as part
    -    // of problem Jacobian (see below).
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION. DO NOT USE IT IN PRODUCTION CODE!!!!!!!
         //
    -    // NOTE: MinNLC optimizer supports arbitrary combination of boundary, general
    -    //       linear and general nonlinear constraints. This example does not
    -    //       show how to work with general linear constraints, but you can
    -    //       easily find it in documentation on minnlcsetbc() and
    -    //       minnlcsetlc() functions.
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
         //
    -    minnlcsetnlc(state, 1, 0);
    +    mincgoptguardsmoothness(state);
    +    mincgoptguardgradient(state, 0.001);
     
         //
    -    // Optimize and test results.
    -    //
    -    // Optimizer object accepts vector function and its Jacobian, with first
    -    // component (Jacobian row) being target function, and next components
    -    // (Jacobian rows) being nonlinear equality and inequality constraints.
    -    //
    -    // So, our vector function has form
    -    //
    -    //     {f0,f1} = { -x0+x1 , x0^2+x1^2-1 }
    +    // Optimize and evaluate results
         //
    -    // with Jacobian
    +    mincgreport rep;
    +    alglib::mincgoptimize(state, function1_grad);
    +    mincgresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
         //
    -    //         [  -1    +1  ]
    -    //     J = [            ]
    -    //         [ 2*x0  2*x1 ]
    +    // Check that OptGuard did not report errors
         //
    -    // with f0 being target function, f1 being constraining function. Number
    -    // of equality/inequality constraints is specified by minnlcsetnlc(),
    -    // with equality ones always being first, inequality ones being last.
    +    // NOTE: want to test OptGuard? Try breaking the gradient - say, add
    +    //       1.0 to some of its components.
         //
    -    alglib::minnlcoptimize(state, nlcfunc1_jac);
    -    minnlcresults(state, x1, rep);
    -    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [0.70710,-0.70710]
    +    optguardreport ogrep;
    +    mincgoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -27336,119 +28838,80 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void  nlcfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
     {
         //
    -    // this callback calculates
    -    //
    -    //     f0(x0,x1) = -x0+x1
    -    //     f1(x0,x1) = x0^2+x1^2-1
    -    //
    -    // and Jacobian matrix J = [dfi/dxj]
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // and its derivatives df/d0 and df/dx1
         //
    -    fi[0] = -x[0]+x[1];
    -    fi[1] = x[0]*x[0] + x[1]*x[1] - 1.0;
    -    jac[0][0] = -1.0;
    -    jac[0][1] = +1.0;
    -    jac[1][0] = 2*x[0];
    -    jac[1][1] = 2*x[1];
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
     }
     
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of
    -    //
    -    //     f(x0,x1) = -x0+x1
    -    //
    -    // subject to boundary constraints
    -    //
    -    //    x0>=0, x1>=0
    -    //
    -    // and nonlinear inequality constraint
    +    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    +    // with nonlinear conjugate gradient method.
         //
    -    //    x0^2 + x1^2 - 1 <= 0
    +    // Several advanced techniques are demonstrated:
    +    // * upper limit on step size
    +    // * restart from new point
         //
    -    real_1d_array x0 = "[0,0]";
    +    real_1d_array x = "[0,0]";
         real_1d_array s = "[1,1]";
         double epsg = 0;
         double epsf = 0;
    -    double epsx = 0.000001;
    +    double epsx = 0.0000000001;
    +    double stpmax = 0.1;
         ae_int_t maxits = 0;
    -    ae_int_t outerits = 5;
    -    ae_int_t updatefreq = 10;
    -    double rho = 1000;
    -    real_1d_array bndl = "[0,0]";
    -    real_1d_array bndu = "[+inf,+inf]";
    -    minnlcstate state;
    -    minnlcreport rep;
    -    real_1d_array x1;
    +    mincgstate state;
    +    mincgreport rep;
     
    -    //
    -    // Create optimizer object, choose AUL algorithm and tune its settings:
    -    // * rho=1000       penalty coefficient
    -    // * outerits=5     number of outer iterations to tune Lagrange coefficients
    -    // * epsx=0.000001  stopping condition for inner iterations
    -    // * s=[1,1]        all variables have unit scale
    -    // * exact low-rank preconditioner is used, updated after each 10 iterations
    -    //
    -    minnlccreate(2, x0, state);
    -    minnlcsetalgoaul(state, rho, outerits);
    -    minnlcsetcond(state, epsg, epsf, epsx, maxits);
    -    minnlcsetscale(state, s);
    -    minnlcsetprecexactlowrank(state, updatefreq);
    +    // create and tune optimizer
    +    mincgcreate(x, state);
    +    mincgsetscale(state, s);
    +    mincgsetcond(state, epsg, epsf, epsx, maxits);
    +    mincgsetstpmax(state, stpmax);
     
    +    // Set up OptGuard integrity checker which catches errors
    +    // like nonsmooth targets or errors in the analytic gradient.
         //
    -    // Set constraints:
    -    //
    -    // 1. boundary constraints are passed with minnlcsetbc() call
    -    //
    -    // 2. nonlinear constraints are more tricky - you can not "pack" general
    -    //    nonlinear function into double precision array. That's why
    -    //    minnlcsetnlc() does not accept constraints itself - only constraint
    -    //    counts are passed: first parameter is number of equality constraints,
    -    //    second one is number of inequality constraints.
    -    //
    -    //    As for constraining functions - these functions are passed as part
    -    //    of problem Jacobian (see below).
    -    //
    -    // NOTE: MinNLC optimizer supports arbitrary combination of boundary, general
    -    //       linear and general nonlinear constraints. This example does not
    -    //       show how to work with general linear constraints, but you can
    -    //       easily find it in documentation on minnlcsetlc() function.
    +    // OptGuard is essential at the early prototyping stages.
         //
    -    minnlcsetbc(state, bndl, bndu);
    -    minnlcsetnlc(state, 0, 1);
    +    // NOTE: gradient verification needs 3*N additional function
    +    //       evaluations; DO NOT USE IT IN THE PRODUCTION CODE
    +    //       because it leads to unnecessary slowdown of your app.
    +    mincgoptguardsmoothness(state);
    +    mincgoptguardgradient(state, 0.001);
     
    -    //
    -    // Optimize and test results.
    -    //
    -    // Optimizer object accepts vector function and its Jacobian, with first
    -    // component (Jacobian row) being target function, and next components
    -    // (Jacobian rows) being nonlinear equality and inequality constraints.
    -    //
    -    // So, our vector function has form
    -    //
    -    //     {f0,f1} = { -x0+x1 , x0^2+x1^2-1 }
    -    //
    -    // with Jacobian
    -    //
    -    //         [  -1    +1  ]
    -    //     J = [            ]
    -    //         [ 2*x0  2*x1 ]
    -    //
    -    // with f0 being target function, f1 being constraining function. Number
    -    // of equality/inequality constraints is specified by minnlcsetnlc(),
    -    // with equality ones always being first, inequality ones being last.
    -    //
    -    alglib::minnlcoptimize(state, nlcfunc1_jac);
    -    minnlcresults(state, x1, rep);
    -    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [1.0000,0.0000]
    +    // first run
    +    alglib::mincgoptimize(state, function1_grad);
    +    mincgresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
    +    // second run - algorithm is restarted with mincgrestartfrom()
    +    x = "[10,10]";
    +    mincgrestartfrom(state, x);
    +    alglib::mincgoptimize(state, function1_grad);
    +    mincgresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
    +    // check OptGuard integrity report. Why do we need it at all?
    +    // Well, try breaking the gradient by adding 1.0 to some
    +    // of its components - OptGuard should report it as error.
    +    // And it may also catch unintended errors too :)
    +    optguardreport ogrep;
    +    mincgoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -27457,29 +28920,12 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void  nlcfunc2_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +void function1_func(const real_1d_array &x, double &func, void *ptr)
     {
         //
    -    // this callback calculates
    -    //
    -    //     f0(x0,x1,x2) = x0+x1
    -    //     f1(x0,x1,x2) = x2-exp(x0)
    -    //     f2(x0,x1,x2) = x0^2+x1^2-1
    -    //
    -    // and Jacobian matrix J = [dfi/dxj]
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
         //
    -    fi[0] = x[0]+x[1];
    -    fi[1] = x[2]-exp(x[0]);
    -    fi[2] = x[0]*x[0] + x[1]*x[1] - 1.0;
    -    jac[0][0] = 1.0;
    -    jac[0][1] = 1.0;
    -    jac[0][2] = 0.0;
    -    jac[1][0] = -exp(x[0]);
    -    jac[1][1] = 0.0;
    -    jac[1][2] = 1.0;
    -    jac[2][0] = 2*x[0];
    -    jac[2][1] = 2*x[1];
    -    jac[2][2] = 0.0;
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
     }
     
     int main(int argc, char **argv)
    @@ -27487,1070 +28933,1310 @@
         //
         // This example demonstrates minimization of
         //
    -    //     f(x0,x1) = x0+x1
    -    //
    -    // subject to nonlinear inequality constraint
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
         //
    -    //    x0^2 + x1^2 - 1 <= 0
    +    // using numerical differentiation to calculate gradient.
         //
    -    // and nonlinear equality constraint
    +    // We also show how to use OptGuard integrity checker to catch common
    +    // problem statement errors like accidentally specifying nonsmooth target
    +    // function.
         //
    -    //    x2-exp(x0) = 0
    +    // First, we set up optimizer...
         //
    -    real_1d_array x0 = "[0,0,0]";
    -    real_1d_array s = "[1,1,1]";
    +    real_1d_array x = "[0,0]";
    +    real_1d_array s = "[1,1]";
         double epsg = 0;
         double epsf = 0;
    -    double epsx = 0.000001;
    +    double epsx = 0.0000000001;
    +    double diffstep = 1.0e-6;
         ae_int_t maxits = 0;
    -    ae_int_t outerits = 5;
    -    ae_int_t updatefreq = 10;
    -    double rho = 1000;
    -    minnlcstate state;
    -    minnlcreport rep;
    -    real_1d_array x1;
    -
    -    //
    -    // Create optimizer object, choose AUL algorithm and tune its settings:
    -    // * rho=1000       penalty coefficient
    -    // * outerits=5     number of outer iterations to tune Lagrange coefficients
    -    // * epsx=0.000001  stopping condition for inner iterations
    -    // * s=[1,1]        all variables have unit scale
    -    // * exact low-rank preconditioner is used, updated after each 10 iterations
    -    //
    -    minnlccreate(3, x0, state);
    -    minnlcsetalgoaul(state, rho, outerits);
    -    minnlcsetcond(state, epsg, epsf, epsx, maxits);
    -    minnlcsetscale(state, s);
    -    minnlcsetprecexactlowrank(state, updatefreq);
    +    mincgstate state;
    +    mincgcreatef(x, diffstep, state);
    +    mincgsetcond(state, epsg, epsf, epsx, maxits);
    +    mincgsetscale(state, s);
     
         //
    -    // Set constraints:
    -    //
    -    // Nonlinear constraints are tricky - you can not "pack" general
    -    // nonlinear function into double precision array. That's why
    -    // minnlcsetnlc() does not accept constraints itself - only constraint
    -    // counts are passed: first parameter is number of equality constraints,
    -    // second one is number of inequality constraints.
    +    // Then, we activate OptGuard integrity checking.
         //
    -    // As for constraining functions - these functions are passed as part
    -    // of problem Jacobian (see below).
    +    // Numerical differentiation always produces "correct" gradient
    +    // (with some truncation error, but unbiased). Thus, we just have
    +    // to check smoothness properties of the target: C0 and C1 continuity.
         //
    -    // NOTE: MinNLC optimizer supports arbitrary combination of boundary, general
    -    //       linear and general nonlinear constraints. This example does not
    -    //       show how to work with boundary or general linear constraints, but you
    -    //       can easily find it in documentation on minnlcsetbc() and
    -    //       minnlcsetlc() functions.
    +    // Sometimes user accidentally tried to solve nonsmooth problems
    +    // with smooth optimizer. OptGuard helps to detect such situations
    +    // early, at the prototyping stage.
         //
    -    minnlcsetnlc(state, 1, 1);
    +    mincgoptguardsmoothness(state);
     
         //
    -    // Optimize and test results.
    -    //
    -    // Optimizer object accepts vector function and its Jacobian, with first
    -    // component (Jacobian row) being target function, and next components
    -    // (Jacobian rows) being nonlinear equality and inequality constraints.
    -    //
    -    // So, our vector function has form
    -    //
    -    //     {f0,f1,f2} = { x0+x1 , x2-exp(x0) , x0^2+x1^2-1 }
    -    //
    -    // with Jacobian
    +    // Now we are ready to run the optimization
         //
    -    //         [  +1      +1       0 ]
    -    //     J = [-exp(x0)  0        1 ]
    -    //         [ 2*x0    2*x1      0 ]
    +    mincgreport rep;
    +    alglib::mincgoptimize(state, function1_func);
    +    mincgresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
         //
    -    // with f0 being target function, f1 being equality constraint "f1=0",
    -    // f2 being inequality constraint "f2<=0". Number of equality/inequality
    -    // constraints is specified by minnlcsetnlc(), with equality ones always
    -    // being first, inequality ones being last.
    +    // ...and to check OptGuard integrity report.
         //
    -    alglib::minnlcoptimize(state, nlcfunc2_jac);
    -    minnlcresults(state, x1, rep);
    -    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [-0.70710,-0.70710,0.49306]
    +    // Want to challenge OptGuard? Try to make your problem
    +    // nonsmooth by replacing 100*(x+3)^4 by 100*|x+3| and
    +    // re-run optimizer.
    +    //
    +    optguardreport ogrep;
    +    mincgoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    + - +
     
    /************************************************************************* -This structure stores optimization report: -* IterationsCount total number of inner iterations -* NFEV number of gradient evaluations -* TerminationType termination type (see below) -* CErr maximum violation of all types of constraints -* LCErr maximum violation of linear constraints -* NLCErr maximum violation of nonlinear constraints - -TERMINATION CODES - -TerminationType field contains completion code, which can be: - -8 internal integrity control detected infinite or NAN values in - function/gradient. Abnormal termination signalled. - -3 box constraints are inconsistent - -1 inconsistent parameters were passed: - * penalty parameter for minnssetalgoags() is zero, - but we have nonlinear constraints set by minnssetnlc() - 2 sampling radius decreased below epsx - 5 MaxIts steps was taken - 7 stopping conditions are too stringent, - further improvement is impossible, - X contains best point found so far. - 8 User requested termination via MinNSRequestTermination() -Other fields of this structure are not documented and should not be used! *************************************************************************/ -
    class minnsreport +
    class minasareport { ae_int_t iterationscount; ae_int_t nfev; - double cerr; - double lcerr; - double nlcerr; ae_int_t terminationtype; - ae_int_t varidx; - ae_int_t funcidx; + ae_int_t activeconstraints; };
    - +
     
    /************************************************************************* -This object stores nonlinear optimizer state. -You should use functions provided by MinNS subpackage to work with this -object + *************************************************************************/ -
    class minnsstate +
    class minasastate { };
    - -
    -
    /************************************************************************* - NONSMOOTH NONCONVEX OPTIMIZATION - SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS - -DESCRIPTION: - -The subroutine minimizes function F(x) of N arguments subject to any -combination of: -* bound constraints -* linear inequality constraints -* linear equality constraints -* nonlinear equality constraints Gi(x)=0 -* nonlinear inequality constraints Hi(x)<=0 - -IMPORTANT: see MinNSSetAlgoAGS for important information on performance - restrictions of AGS solver. - -REQUIREMENTS: -* starting point X0 must be feasible or not too far away from the feasible - set -* F(), G(), H() are continuous, locally Lipschitz and continuously (but - not necessarily twice) differentiable in an open dense subset of R^N. - Functions F(), G() and H() may be nonsmooth and non-convex. - Informally speaking, it means that functions are composed of large - differentiable "patches" with nonsmoothness having place only at the - boundaries between these "patches". - Most real-life nonsmooth functions satisfy these requirements. Say, - anything which involves finite number of abs(), min() and max() is very - likely to pass the test. - Say, it is possible to optimize anything of the following: - * f=abs(x0)+2*abs(x1) - * f=max(x0,x1) - * f=sin(max(x0,x1)+abs(x2)) -* for nonlinearly constrained problems: F() must be bounded from below - without nonlinear constraints (this requirement is due to the fact that, - contrary to box and linear constraints, nonlinear ones require special - handling). -* user must provide function value and gradient for F(), H(), G() at all - points where function/gradient can be calculated. If optimizer requires - value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)), - where gradient is not defined, user may resolve tie arbitrarily (in our - case - return +1 or -1 at its discretion). -* NS solver supports numerical differentiation, i.e. it may differentiate - your function for you, but it results in 2N increase of function - evaluations. Not recommended unless you solve really small problems. See - minnscreatef() for more information on this functionality. - -USAGE: - -1. User initializes algorithm state with MinNSCreate() call and chooses - what NLC solver to use. There is some solver which is used by default, - with default settings, but you should NOT rely on default choice. It - may change in future releases of ALGLIB without notice, and no one can - guarantee that new solver will be able to solve your problem with - default settings. - - From the other side, if you choose solver explicitly, you can be pretty - sure that it will work with new ALGLIB releases. - - In the current release following solvers can be used: - * AGS solver (activated with MinNSSetAlgoAGS() function) - -2. User adds boundary and/or linear and/or nonlinear constraints by means - of calling one of the following functions: - a) MinNSSetBC() for boundary constraints - b) MinNSSetLC() for linear constraints - c) MinNSSetNLC() for nonlinear constraints - You may combine (a), (b) and (c) in one optimization problem. - -3. User sets scale of the variables with MinNSSetScale() function. It is - VERY important to set scale of the variables, because nonlinearly - constrained problems are hard to solve when variables are badly scaled. - -4. User sets stopping conditions with MinNSSetCond(). - -5. Finally, user calls MinNSOptimize() function which takes algorithm - state and pointer (delegate, etc) to callback function which calculates - F/G/H. - -7. User calls MinNSResults() to get solution - -8. Optionally user may call MinNSRestartFrom() to solve another problem - with same N but another starting point. MinNSRestartFrom() allows to - reuse already initialized structure. - - -INPUT PARAMETERS: - N - problem dimension, N>0: - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - X - starting point, array[N]: - * it is better to set X to a feasible point - * but X can be infeasible, in which case algorithm will try - to find feasible point first, using X as initial - approximation. - -OUTPUT PARAMETERS: - State - structure stores algorithm state - -NOTE: minnscreatef() function may be used if you do not have analytic - gradient. This function creates solver which uses numerical - differentiation with user-specified step. - - -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::minnscreate(real_1d_array x, minnsstate& state); -void alglib::minnscreate(ae_int_t n, real_1d_array x, minnsstate& state); - -
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -Version of minnscreatef() which uses numerical differentiation. I.e., you -do not have to calculate derivatives yourself. However, this version needs -2N times more function evaluations. - -2-point differentiation formula is used, because more precise 4-point -formula is unstable when used on non-smooth functions. - -INPUT PARAMETERS: - N - problem dimension, N>0: - * if given, only leading N elements of X are used - * if not given, automatically determined from size of X - X - starting point, array[N]: - * it is better to set X to a feasible point - * but X can be infeasible, in which case algorithm will try - to find feasible point first, using X as initial - approximation. - DiffStep- differentiation step, DiffStep>0. Algorithm performs - numerical differentiation with step for I-th variable - being equal to DiffStep*S[I] (here S[] is a scale vector, - set by minnssetscale() function). - Do not use too small steps, because it may lead to - catastrophic cancellation during intermediate calculations. - -OUTPUT PARAMETERS: - State - structure stores algorithm state +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 25.03.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnscreatef( +
    void alglib::minasacreate( real_1d_array x, - double diffstep, - minnsstate& state); -void alglib::minnscreatef( + real_1d_array bndl, + real_1d_array bndu, + minasastate& state, + const xparams _params = alglib::xdefault); +void alglib::minasacreate( ae_int_t n, real_1d_array x, - double diffstep, - minnsstate& state); + real_1d_array bndl, + real_1d_array bndu, + minasastate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* This family of functions is used to launcn iterations of nonlinear optimizer These functions accept following parameters: state - algorithm state - fvec - callback which calculates function vector fi[] - at given point x - jac - callback which calculates function vector fi[] - and Jacobian jac at given point x + grad - callback which calculates function (or merit function) + value func and gradient grad at given point x rep - optional callback which is called after each iteration can be NULL ptr - optional pointer which is passed to func/grad/hess/jac/rep can be NULL -NOTES: - -1. This function has two different implementations: one which uses exact - (analytical) user-supplied Jacobian, and one which uses only function - vector and numerically differentiates function in order to obtain - gradient. - - Depending on the specific function used to create optimizer object - you should choose appropriate variant of minnsoptimize() - one which - accepts function AND Jacobian or one which accepts ONLY function. - - Be careful to choose variant of minnsoptimize() which corresponds to - your optimization scheme! Table below lists different combinations of - callback (function/gradient) passed to minnsoptimize() and specific - function used to create optimizer. - - - | USER PASSED TO minnsoptimize() - CREATED WITH | function only | function and gradient - ------------------------------------------------------------ - minnscreatef() | works FAILS - minnscreate() | FAILS works - - Here "FAILS" denotes inappropriate combinations of optimizer creation - function and minnsoptimize() version. Attemps to use such - combination will lead to exception. Either you did not pass gradient - when it WAS needed or you passed gradient when it was NOT needed. - -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void minnsoptimize(minnsstate &state, - void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); -void minnsoptimize(minnsstate &state, - void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), +
    void minasaoptimize(minasastate &state, + void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); + void *ptr = NULL, + const xparams _xparams = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -This subroutine submits request for termination of running optimizer. It -should be called from user-supplied callback when user decides that it is -time to "smoothly" terminate optimization process. As result, optimizer -stops at point which was "current accepted" when termination request was -submitted and returns error code 8 (successful termination). - -INPUT PARAMETERS: - State - optimizer structure - -NOTE: after request for termination optimizer may perform several - additional calls to user-supplied callbacks. It does NOT guarantee - to stop immediately - it just guarantees that these additional calls - will be discarded later. - -NOTE: calling this function on optimizer which is NOT running will have no - effect. - -NOTE: multiple calls to this function are possible. First call is counted, - subsequent calls are silently ignored. +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnsrequesttermination(minnsstate state); +
    void alglib::minasarestartfrom( + minasastate state, + real_1d_array x, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine restarts algorithm from new point. -All optimization parameters (including constraints) are left unchanged. - -This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. - -INPUT PARAMETERS: - State - structure previously allocated with minnscreate() call. - X - new starting point. +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnsrestartfrom(minnsstate state, real_1d_array x); +
    void alglib::minasaresults( + minasastate state, + real_1d_array& x, + minasareport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -MinNS results - -INPUT PARAMETERS: - State - algorithm state - -OUTPUT PARAMETERS: - X - array[0..N-1], solution - Rep - optimization report. You should check Rep.TerminationType - in order to distinguish successful termination from - unsuccessful one: - * -8 internal integrity control detected infinite or - NAN values in function/gradient. Abnormal - termination signalled. - * -3 box constraints are inconsistent - * -1 inconsistent parameters were passed: - * penalty parameter for minnssetalgoags() is zero, - but we have nonlinear constraints set by minnssetnlc() - * 2 sampling radius decreased below epsx - * 7 stopping conditions are too stringent, - further improvement is impossible, - X contains best point found so far. - * 8 User requested termination via minnsrequesttermination() +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnsresults( - minnsstate state, +
    void alglib::minasaresultsbuf( + minasastate state, real_1d_array& x, - minnsreport& rep); + minasareport& rep, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* - -Buffered implementation of minnsresults() which uses pre-allocated buffer -to store X[]. If buffer size is too small, it resizes buffer. It is -intended to be used in the inner cycles of performance critical algorithms -where array reallocation penalty is too large to be ignored. +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnsresultsbuf( - minnsstate state, - real_1d_array& x, - minnsreport& rep); +
    void alglib::minasasetalgorithm( + minasastate state, + ae_int_t algotype, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function tells MinNS unit to use AGS (adaptive gradient sampling) -algorithm for nonsmooth constrained optimization. This algorithm is a -slight modification of one described in "An Adaptive Gradient Sampling -Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez. - -This optimizer has following benefits and drawbacks: -+ robustness; it can be used with nonsmooth and nonconvex functions. -+ relatively easy tuning; most of the metaparameters are easy to select. -- it has convergence of steepest descent, slower than CG/LBFGS. -- each iteration involves evaluation of ~2N gradient values and solution - of 2Nx2N quadratic programming problem, which limits applicability of - algorithm by small-scale problems (up to 50-100). - -IMPORTANT: this algorithm has convergence guarantees, i.e. it will - steadily move towards some stationary point of the function. - - However, "stationary point" does not always mean "solution". - Nonsmooth problems often have "flat spots", i.e. areas where - function do not change at all. Such "flat spots" are stationary - points by definition, and algorithm may be caught here. - - Nonsmooth CONVEX tasks are not prone to this problem. Say, if - your function has form f()=MAX(f0,f1,...), and f_i are convex, - then f() is convex too and you have guaranteed convergence to - solution. - -INPUT PARAMETERS: - State - structure which stores algorithm state - Radius - initial sampling radius, >=0. - - Internally multiplied by vector of per-variable scales - specified by minnssetscale()). - - You should select relatively large sampling radius, roughly - proportional to scaled length of the first steps of the - algorithm. Something close to 0.1 in magnitude should be - good for most problems. - - AGS solver can automatically decrease radius, so too large - radius is not a problem (assuming that you won't choose - so large radius that algorithm will sample function in - too far away points, where gradient value is irrelevant). - - Too small radius won't cause algorithm to fail, but it may - slow down algorithm (it may have to perform too short - steps). - Penalty - penalty coefficient for nonlinear constraints: - * for problem with nonlinear constraints should be some - problem-specific positive value, large enough that - penalty term changes shape of the function. - Starting from some problem-specific value penalty - coefficient becomes large enough to exactly enforce - nonlinear constraints; larger values do not improve - precision. - Increasing it too much may slow down convergence, so you - should choose it carefully. - * can be zero for problems WITHOUT nonlinear constraints - (i.e. for unconstrained ones or ones with just box or - linear constraints) - * if you specify zero value for problem with at least one - nonlinear constraint, algorithm will terminate with - error code -1. - -ALGORITHM OUTLINE - -The very basic outline of unconstrained AGS algorithm is given below: - -0. If sampling radius is below EpsX or we performed more then MaxIts - iterations - STOP. -1. sample O(N) gradient values at random locations around current point; - informally speaking, this sample is an implicit piecewise linear model - of the function, although algorithm formulation does not mention that - explicitly -2. solve quadratic programming problem in order to find descent direction -3. if QP solver tells us that we are near solution, decrease sampling - radius and move to (0) -4. perform backtracking line search -5. after moving to new point, goto (0) - -As for the constraints: -* box constraints are handled exactly by modification of the function - being minimized -* linear/nonlinear constraints are handled by adding L1 penalty. Because - our solver can handle nonsmoothness, we can use L1 penalty function, - which is an exact one (i.e. exact solution is returned under such - penalty). -* penalty coefficient for linear constraints is chosen automatically; - however, penalty coefficient for nonlinear constraints must be specified - by user. +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetalgoags( - minnsstate state, - double radius, - double penalty); +
    void alglib::minasasetcond( + minasastate state, + double epsg, + double epsf, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -This function sets boundary constraints. +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -Boundary constraints are inactive by default (after initial creation). -They are preserved after algorithm restart with minnsrestartfrom(). + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minasasetstpmax( + minasastate state, + double stpmax, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - State - structure stores algorithm state - BndL - lower bounds, array[N]. - If some (all) variables are unbounded, you may specify - very small number or -INF. - BndU - upper bounds, array[N]. - If some (all) variables are unbounded, you may specify - very large number or +INF. +
    + +
    +
    /************************************************************************* +Obsolete optimization algorithm. +Was replaced by MinBLEIC subpackage. -NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th -variable will be "frozen" at X[i]=BndL[i]=BndU[i]. + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minasasetxrep( + minasastate state, + bool needxrep, + const xparams _params = alglib::xdefault); -NOTE 2: AGS solver has following useful properties: -* bound constraints are always satisfied exactly -* function is evaluated only INSIDE area specified by bound constraints, - even when numerical differentiation is used (algorithm adjusts nodes - according to boundary constraints) +
    + +
    +
    /************************************************************************* +This is obsolete function which was used by previous version of the BLEIC +optimizer. It does nothing in the current version of BLEIC. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 28.11.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetbc( - minnsstate state, - real_1d_array bndl, - real_1d_array bndu); +
    void alglib::minbleicsetbarrierdecay( + minbleicstate state, + double mudecay, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets stopping conditions for iterations of optimizer. +This is obsolete function which was used by previous version of the BLEIC +optimizer. It does nothing in the current version of BLEIC. -INPUT PARAMETERS: - State - structure which stores algorithm state - EpsX - >=0 - The AGS solver finishes its work if on k+1-th iteration - sampling radius decreases below EpsX. - MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minbleicsetbarrierwidth( + minbleicstate state, + double mu, + const xparams _params = alglib::xdefault); -Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic -stopping criterion selection. We do not recommend you to rely on default -choice in production code. +
    + +
    +
    /************************************************************************* +Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 13.10.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetcond(minnsstate state, double epsx, ae_int_t maxits); +
    void alglib::minlbfgssetcholeskypreconditioner( + minlbfgsstate state, + real_2d_array p, + bool isupper, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -This function sets linear constraints. +Obsolete function, use MinLBFGSSetPrecDefault() instead. -Linear constraints are inactive by default (after initial creation). -They are preserved after algorithm restart with minnsrestartfrom(). + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetdefaultpreconditioner( + minlbfgsstate state, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - State - structure previously allocated with minnscreate() call. - C - linear constraints, array[K,N+1]. - Each row of C represents one constraint, either equality - or inequality (see below): - * first N elements correspond to coefficients, - * last element corresponds to the right part. - All elements of C (including right part) must be finite. - CT - type of constraints, array[K]: - * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] - * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] - * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] - K - number of equality/inequality constraints, K>=0: - * if given, only leading K elements of C/CT are used - * if not given, automatically determined from sizes of C/CT +
    + + + +
    +
    /************************************************************************* +This structure stores optimization report: +* IterationsCount total number of inner iterations +* NFEV number of gradient evaluations +* TerminationType termination type (see below) -NOTE: linear (non-bound) constraints are satisfied only approximately: +TERMINATION CODES -* there always exists some minor violation (about current sampling radius - in magnitude during optimization, about EpsX in the solution) due to use - of penalty method to handle constraints. -* numerical differentiation, if used, may lead to function evaluations - outside of the feasible area, because algorithm does NOT change - numerical differentiation formula according to linear constraints. +TerminationType field contains completion code, which can be: + -8 internal integrity control detected infinite or NAN values in + function/gradient. Abnormal termination signalled. + 1 relative function improvement is no more than EpsF. + 2 relative step is no more than EpsX. + 4 gradient norm is no more than EpsG + 5 MaxIts steps was taken + 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. + 8 terminated by user who called minlbfgsrequesttermination(). + X contains point which was "current accepted" when termination + request was submitted. -If you want constraints to be satisfied exactly, try to reformulate your -problem in such manner that all constraints will become boundary ones -(this kind of constraints is always satisfied exactly, both in the final -solution and in all intermediate points). +Other fields of this structure are not documented and should not be used! +*************************************************************************/ +
    class minlbfgsreport +{ + ae_int_t iterationscount; + ae_int_t nfev; + ae_int_t terminationtype; +}; + +
    + +
    +
    /************************************************************************* - -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetlc( - minnsstate state, - real_2d_array c, - integer_1d_array ct); -void alglib::minnssetlc( - minnsstate state, - real_2d_array c, - integer_1d_array ct, - ae_int_t k); +
    class minlbfgsstate +{ +};
    - +
     
    /************************************************************************* -This function sets nonlinear constraints. + LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION -In fact, this function sets NUMBER of nonlinear constraints. Constraints -itself (constraint functions) are passed to minnsoptimize() method. This -method requires user-defined vector function F[] and its Jacobian J[], -where: -* first component of F[] and first row of Jacobian J[] correspond to - function being minimized -* next NLEC components of F[] (and rows of J) correspond to nonlinear - equality constraints G_i(x)=0 -* next NLIC components of F[] (and rows of J) correspond to nonlinear - inequality constraints H_i(x)<=0 +DESCRIPTION: +The subroutine minimizes function F(x) of N arguments by using a quasi- +Newton method (LBFGS scheme) which is optimized to use a minimum amount +of memory. +The subroutine generates the approximation of an inverse Hessian matrix by +using information about the last M steps of the algorithm (instead of N). +It lessens a required amount of memory from a value of order N^2 to a +value of order 2*N*M. -NOTE: you may combine nonlinear constraints with linear/boundary ones. If - your problem has mixed constraints, you may explicitly specify some - of them as linear ones. It may help optimizer to handle them more - efficiently. -INPUT PARAMETERS: - State - structure previously allocated with minnscreate() call. - NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 - NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 +REQUIREMENTS: +Algorithm will request following information during its operation: +* function value F and its gradient G (simultaneously) at given point X -NOTE 1: nonlinear constraints are satisfied only approximately! It is - possible that algorithm will evaluate function outside of - the feasible area! -NOTE 2: algorithm scales variables according to scale specified by - minnssetscale() function, so it can handle problems with badly - scaled variables (as long as we KNOW their scales). +USAGE: +1. User initializes algorithm state with MinLBFGSCreate() call +2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() + and other functions +3. User calls MinLBFGSOptimize() function which takes algorithm state and + pointer (delegate, etc.) to callback function which calculates F/G. +4. User calls MinLBFGSResults() to get solution +5. Optionally user may call MinLBFGSRestartFrom() to solve another problem + with same N/M but another starting point and/or another function. + MinLBFGSRestartFrom() allows to reuse already initialized structure. - However, there is no way to automatically scale nonlinear - constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may - ruin convergence. Solving problem with constraint "1000*G0(x)=0" - is NOT same as solving it with constraint "0.001*G0(x)=0". - It means that YOU are the one who is responsible for correct - scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you - to scale nonlinear constraints in such way that I-th component of - dG/dX (or dH/dx) has approximately unit magnitude (for problems - with unit scale) or has magnitude approximately equal to 1/S[i] - (where S is a scale set by minnssetscale() function). +INPUT PARAMETERS: + N - problem dimension. N>0 + M - number of corrections in the BFGS scheme of Hessian + approximation update. Recommended value: 3<=M<=7. The smaller + value causes worse convergence, the bigger will not cause a + considerably better convergence, but will cause a fall in the + performance. M<=N. + X - initial solution approximation, array[0..N-1]. -NOTE 3: nonlinear constraints are always hard to handle, no matter what - algorithm you try to use. Even basic box/linear constraints modify - function curvature by adding valleys and ridges. However, - nonlinear constraints add valleys which are very hard to follow - due to their "curved" nature. - It means that optimization with single nonlinear constraint may be - significantly slower than optimization with multiple linear ones. - It is normal situation, and we recommend you to carefully choose - Rho parameter of minnssetalgoags(), because too large value may - slow down convergence. +OUTPUT PARAMETERS: + State - structure which stores algorithm state + + +NOTES: +1. you may tune stopping conditions with MinLBFGSSetCond() function +2. if target function contains exp() or other fast growing functions, and + optimization algorithm makes too large steps which leads to overflow, + use MinLBFGSSetStpMax() function to bound algorithm's steps. However, + L-BFGS rarely needs such a tuning. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 02.04.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetnlc(minnsstate state, ae_int_t nlec, ae_int_t nlic); +
    void alglib::minlbfgscreate( + ae_int_t m, + real_1d_array x, + minlbfgsstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlbfgscreate( + ae_int_t n, + ae_int_t m, + real_1d_array x, + minlbfgsstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function sets scaling coefficients for NLC optimizer. - -ALGLIB optimizers use scaling matrices to test stopping conditions (step -size and gradient are scaled before comparison with tolerances). Scale of -the I-th variable is a translation invariant measure of: -a) "how large" the variable is -b) how large the step should be to make significant changes in the function +The subroutine is finite difference variant of MinLBFGSCreate(). It uses +finite differences in order to differentiate target function. -Scaling is also used by finite difference variant of the optimizer - step -along I-th axis is equal to DiffStep*S[I]. +Description below contains information which is specific to this function +only. We recommend to read comments on MinLBFGSCreate() in order to get +more information about creation of LBFGS optimizer. INPUT PARAMETERS: - State - structure stores algorithm state - S - array[N], non-zero scaling coefficients - S[i] may be negative, sign doesn't matter. + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + M - number of corrections in the BFGS scheme of Hessian + approximation update. Recommended value: 3<=M<=7. The smaller + value causes worse convergence, the bigger will not cause a + considerably better convergence, but will cause a fall in the + performance. M<=N. + X - starting point, array[0..N-1]. + DiffStep- differentiation step, >0 + +OUTPUT PARAMETERS: + State - structure which stores algorithm state + +NOTES: +1. algorithm uses 4-point central formula for differentiation. +2. differentiation step along I-th axis is equal to DiffStep*S[I] where + S[] is scaling vector which can be set by MinLBFGSSetScale() call. +3. we recommend you to use moderate values of differentiation step. Too + large step will result in too large truncation errors, while too small + step will result in too large numerical errors. 1.0E-6 can be good + value to start with. +4. Numerical differentiation is very inefficient - one gradient + calculation needs 4*N function evaluations. This function will work for + any N - either small (1...10), moderate (10...100) or large (100...). + However, performance penalty will be too severe for any N's except for + small ones. + We should also say that code which relies on numerical differentiation + is less robust and precise. LBFGS needs exact gradient values. + Imprecise gradient may slow down convergence, especially on highly + nonlinear problems. + Thus we recommend to use this function for fast prototyping on small- + dimensional problems only, and to implement analytical gradient as soon + as possible. -- ALGLIB -- - Copyright 18.05.2015 by Bochkanov Sergey + Copyright 16.05.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetscale(minnsstate state, real_1d_array s); +
    void alglib::minlbfgscreatef( + ae_int_t m, + real_1d_array x, + double diffstep, + minlbfgsstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlbfgscreatef( + ae_int_t n, + ae_int_t m, + real_1d_array x, + double diffstep, + minlbfgsstate& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function turns on/off reporting. +This function activates/deactivates verification of the user-supplied +analytic gradient. + +Upon activation of this option OptGuard integrity checker performs +numerical differentiation of your target function at the initial point +(note: future versions may also perform check at the final point) and +compares numerical gradient with analytic one provided by you. + +If difference is too large, an error flag is set and optimization session +continues. After optimization session is over, you can retrieve the report +which stores both gradients and specific components highlighted as +suspicious by the OptGuard. + +The primary OptGuard report can be retrieved with minlbfgsoptguardresults(). + +IMPORTANT: gradient check is a high-overhead option which will cost you + about 3*N additional function evaluations. In many cases it may + cost as much as the rest of the optimization session. + + YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO + CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. + +NOTE: unlike previous incarnation of the gradient checking code, OptGuard + does NOT interrupt optimization even if it discovers bad gradient. INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not + State - structure used to store algorithm state + TestStep - verification step used for numerical differentiation: + * TestStep=0 turns verification off + * TestStep>0 activates verification + You should carefully choose TestStep. Value which is + too large (so large that function behavior is non- + cubic at this scale) will lead to false alarms. Too + short step will result in rounding errors dominating + numerical derivative. -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to minnsoptimize(). + You may use different step for different parameters by + means of setting scale with minlbfgssetscale(). + +=== EXPLANATION ========================================================== + +In order to verify gradient algorithm performs following steps: + * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], + where X[i] is i-th component of the initial point and S[i] is a scale + of i-th parameter + * F(X) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point -- ALGLIB -- - Copyright 28.11.2010 by Bochkanov Sergey + Copyright 15.06.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minnssetxrep(minnsstate state, bool needxrep); +
    void alglib::minlbfgsoptguardgradient( + minlbfgsstate state, + double teststep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #0 + +Nonsmoothness (non-C1) test #0 studies function values (not gradient!) +obtained during line searches and monitors behavior of the directional +derivative estimate. + +This test is less powerful than test #1, but it does not depend on the +gradient values and thus it is more robust against artifacts introduced by +numerical differentiation. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== -using namespace alglib; -void nsfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr) -{ - // - // this callback calculates - // - // f0(x0,x1) = 2*|x0|+x1 - // - // and Jacobian matrix J = [df0/dx0 df0/dx1] - // - fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1])); - jac[0][0] = 2*alglib::sign(x[0]); - jac[0][1] = alglib::sign(x[1]); -} +INPUT PARAMETERS: + state - algorithm state -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of - // - // f(x0,x1) = 2*|x0|+|x1| - // - // subject to box constraints - // - // 1 <= x0 < +INF - // -INF <= x1 < +INF - // - // using nonsmooth nonlinear optimizer. - // - real_1d_array x0 = "[1,1]"; - real_1d_array s = "[1,1]"; - real_1d_array bndl = "[1,-inf]"; - real_1d_array bndu = "[+inf,+inf]"; - double epsx = 0.00001; - double radius = 0.1; - double rho = 0.0; - ae_int_t maxits = 0; - minnsstate state; - minnsreport rep; - real_1d_array x1; +OUTPUT PARAMETERS: + strrep - C1 test #0 "strong" report + lngrep - C1 test #0 "long" report - // - // Create optimizer object, choose AGS algorithm and tune its settings: - // * radius=0.1 good initial value; will be automatically decreased later. - // * rho=0.0 penalty coefficient for nonlinear constraints; can be zero - // because we do not have such constraints - // * epsx=0.000001 stopping conditions - // * s=[1,1] all variables have unit scale - // - minnscreate(2, x0, state); - minnssetalgoags(state, radius, rho); - minnssetcond(state, epsx, maxits); - minnssetscale(state, s); + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsoptguardnonc1test0results( + minlbfgsstate state, + optguardnonc1test0report& strrep, + optguardnonc1test0report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #1 + +Nonsmoothness (non-C1) test #1 studies individual components of the +gradient computed during line search. + +When precise analytic gradient is provided this test is more powerful than +test #0 which works with function values and ignores user-provided +gradient. However, test #0 becomes more powerful when numerical +differentiation is employed (in such cases test #1 detects higher levels +of numerical noise and becomes too conservative). + +This test also tells specific components of the gradient which violate C1 +continuity, which makes it more informative than #0, which just tells that +continuity is violated. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* vidx - is an index of the variable in [0,N) with nonsmooth derivative +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], g[] - arrays of length CNT which store step lengths and gradient + values at these points; g[i] is evaluated in x0+stp[i]*d and contains + vidx-th component of the gradient. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== - // - // Set box constraints. - // - // General linear constraints are set in similar way (see comments on - // minnssetlc() function for more information). - // - // You may combine box, linear and nonlinear constraints in one optimization - // problem. - // - minnssetbc(state, bndl, bndu); +INPUT PARAMETERS: + state - algorithm state - // - // Optimize and test results. - // - // Optimizer object accepts vector function and its Jacobian, with first - // component (Jacobian row) being target function, and next components - // (Jacobian rows) being nonlinear equality and inequality constraints - // (box/linear ones are passed separately by means of minnssetbc() and - // minnssetlc() calls). - // - // If you do not have nonlinear constraints (exactly our situation), then - // you will have one-component function vector and 1xN Jacobian matrix. - // - // So, our vector function has form - // - // {f0} = { 2*|x0|+|x1| } - // - // with Jacobian - // - // [ ] - // J = [ 2*sign(x0) sign(x1) ] - // [ ] - // - // NOTE: nonsmooth optimizer requires considerably more function - // evaluations than smooth solver - about 2N times more. Using - // numerical differentiation introduces additional (multiplicative) - // 2N speedup. - // - // It means that if smooth optimizer WITH user-supplied gradient - // needs 100 function evaluations to solve 50-dimensional problem, - // then AGS solver with user-supplied gradient will need about 10.000 - // function evaluations, and with numerical gradient about 1.000.000 - // function evaluations will be performed. - // - // NOTE: AGS solver used by us can handle nonsmooth and nonconvex - // optimization problems. It has convergence guarantees, i.e. it will - // converge to stationary point of the function after running for some - // time. - // - // However, it is important to remember that "stationary point" is not - // equal to "solution". If your problem is convex, everything is OK. - // But nonconvex optimization problems may have "flat spots" - large - // areas where gradient is exactly zero, but function value is far away - // from optimal. Such areas are stationary points too, and optimizer - // may be trapped here. - // - // "Flat spots" are nonsmooth equivalent of the saddle points, but with - // orders of magnitude worse properties - they may be quite large and - // hard to avoid. All nonsmooth optimizers are prone to this kind of the - // problem, because it is impossible to automatically distinguish "flat - // spot" from true solution. - // - // This note is here to warn you that you should be very careful when - // you solve nonsmooth optimization problems. Visual inspection of - // results is essential. - // - // - alglib::minnsoptimize(state, nsfunc1_jac); - minnsresults(state, x1, rep); - printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [1.0000,0.0000] - return 0; -} +OUTPUT PARAMETERS: + strrep - C1 test #1 "strong" report + lngrep - C1 test #1 "long" report + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsoptguardnonc1test1results( + minlbfgsstate state, + optguardnonc1test1report& strrep, + optguardnonc1test1report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Results of OptGuard integrity check, should be called after optimization +session is over. + +=== PRIMARY REPORT ======================================================= + +OptGuard performs several checks which are intended to catch common errors +in the implementation of nonlinear function/gradient: +* incorrect analytic gradient +* discontinuous (non-C0) target functions (constraints) +* nonsmooth (non-C1) target functions (constraints) + +Each of these checks is activated with appropriate function: +* minlbfgsoptguardgradient() for gradient verification +* minlbfgsoptguardsmoothness() for C0/C1 checks + +Following flags are set when these errors are suspected: +* rep.badgradsuspected, and additionally: + * rep.badgradvidx for specific variable (gradient element) suspected + * rep.badgradxbase, a point where gradient is tested + * rep.badgraduser, user-provided gradient (stored as 2D matrix with + single row in order to make report structure compatible with more + complex optimizers like MinNLC or MinLM) + * rep.badgradnum, reference gradient obtained via numerical + differentiation (stored as 2D matrix with single row in order to make + report structure compatible with more complex optimizers like MinNLC + or MinLM) +* rep.nonc0suspected +* rep.nonc1suspected + +=== ADDITIONAL REPORTS/LOGS ============================================== + +Several different tests are performed to catch C0/C1 errors, you can find +out specific test signaled error by looking to: +* rep.nonc0test0positive, for non-C0 test #0 +* rep.nonc1test0positive, for non-C1 test #0 +* rep.nonc1test1positive, for non-C1 test #1 + +Additional information (including line search logs) can be obtained by +means of: +* minlbfgsoptguardnonc1test0results() +* minlbfgsoptguardnonc1test1results() +which return detailed error reports, specific points where discontinuities +were found, and so on. -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +==========================================================================
     
    -using namespace alglib;
    -void  nsfunc1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    -{
    -    //
    -    // this callback calculates
    -    //
    -    //     f0(x0,x1) = 2*|x0|+x1
    -    //
    -    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    -}
    +INPUT PARAMETERS:
    +    state   -   algorithm state
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of
    -    //
    -    //     f(x0,x1) = 2*|x0|+|x1|
    -    //
    -    // using nonsmooth nonlinear optimizer with numerical
    -    // differentiation provided by ALGLIB.
    -    //
    -    // NOTE: nonsmooth optimizer requires considerably more function
    -    //       evaluations than smooth solver - about 2N times more. Using
    -    //       numerical differentiation introduces additional (multiplicative)
    -    //       2N speedup.
    -    //
    -    //       It means that if smooth optimizer WITH user-supplied gradient
    -    //       needs 100 function evaluations to solve 50-dimensional problem,
    -    //       then AGS solver with user-supplied gradient will need about 10.000
    -    //       function evaluations, and with numerical gradient about 1.000.000
    -    //       function evaluations will be performed.
    -    //
    -    real_1d_array x0 = "[1,1]";
    -    real_1d_array s = "[1,1]";
    -    double epsx = 0.00001;
    -    double diffstep = 0.000001;
    -    double radius = 0.1;
    -    double rho = 0.0;
    -    ae_int_t maxits = 0;
    -    minnsstate state;
    -    minnsreport rep;
    -    real_1d_array x1;
    +OUTPUT PARAMETERS:
    +    rep     -   generic OptGuard report;  more  detailed  reports  can  be
    +                retrieved with other functions.
     
    -    //
    -    // Create optimizer object, choose AGS algorithm and tune its settings:
    -    // * radius=0.1     good initial value; will be automatically decreased later.
    -    // * rho=0.0        penalty coefficient for nonlinear constraints; can be zero
    -    //                  because we do not have such constraints
    -    // * epsx=0.000001  stopping conditions
    -    // * s=[1,1]        all variables have unit scale
    -    //
    -    minnscreatef(2, x0, diffstep, state);
    -    minnssetalgoags(state, radius, rho);
    -    minnssetcond(state, epsx, maxits);
    -    minnssetscale(state, s);
    +NOTE: false negatives (nonsmooth problems are not identified as  nonsmooth
    +      ones) are possible although unlikely.
     
    -    //
    -    // Optimize and test results.
    -    //
    -    // Optimizer object accepts vector function, with first component
    -    // being target function, and next components being nonlinear equality
    -    // and inequality constraints (box/linear ones are passed separately
    -    // by means of minnssetbc() and minnssetlc() calls).
    -    //
    -    // If you do not have nonlinear constraints (exactly our situation), then
    -    // you will have one-component function vector.
    -    //
    -    // So, our vector function has form
    -    //
    -    //     {f0} = { 2*|x0|+|x1| }
    -    //
    -    alglib::minnsoptimize(state, nsfunc1_fvec);
    -    minnsresults(state, x1, rep);
    -    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [0.0000,0.0000]
    -    return 0;
    -}
    +      The reason  is  that  you  need  to  make several evaluations around
    +      nonsmoothness  in  order  to  accumulate  enough  information  about
    +      function curvature. Say, if you start right from the nonsmooth point,
    +      optimizer simply won't get enough data to understand what  is  going
    +      wrong before it terminates due to abrupt changes in the  derivative.
    +      It is also  possible  that  "unlucky"  step  will  move  us  to  the
    +      termination too quickly.
     
    +      Our current approach is to have less than 0.1%  false  negatives  in
    +      our test examples  (measured  with  multiple  restarts  from  random
    +      points), and to have exactly 0% false positives.
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +  -- ALGLIB --
    +     Copyright 21.11.2018 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::minlbfgsoptguardresults( + minlbfgsstate state, + optguardreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function activates/deactivates nonsmoothness monitoring option of +the OptGuard integrity checker. Smoothness monitor silently observes +solution process and tries to detect ill-posed problems, i.e. ones with: +a) discontinuous target function (non-C0) +b) nonsmooth target function (non-C1) + +Smoothness monitoring does NOT interrupt optimization even if it suspects +that your problem is nonsmooth. It just sets corresponding flags in the +OptGuard report which can be retrieved after optimization is over. + +Smoothness monitoring is a moderate overhead option which often adds less +than 1% to the optimizer running time. Thus, you can use it even for large +scale problems. + +NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 + continuity violations. + + First, minor errors are hard to catch - say, a 0.0001 difference in + the model values at two sides of the gap may be due to discontinuity + of the model - or simply because the model has changed. + + Second, C1-violations are especially difficult to detect in a + noninvasive way. The optimizer usually performs very short steps + near the nonsmoothness, and differentiation usually introduces a + lot of numerical noise. It is hard to tell whether some tiny + discontinuity in the slope is due to real nonsmoothness or just due + to numerical noise alone. + + Our top priority was to avoid false positives, so in some rare cases + minor errors may went unnoticed (however, in most cases they can be + spotted with restart from different initial point). + +INPUT PARAMETERS: + state - algorithm state + level - monitoring level: + * 0 - monitoring is disabled + * 1 - noninvasive low-overhead monitoring; function values + and/or gradients are recorded, but OptGuard does not + try to perform additional evaluations in order to + get more information about suspicious locations. + +=== EXPLANATION ========================================================== + +One major source of headache during optimization is the possibility of +the coding errors in the target function/constraints (or their gradients). +Such errors most often manifest themselves as discontinuity or +nonsmoothness of the target/constraints. + +Another frequent situation is when you try to optimize something involving +lots of min() and max() operations, i.e. nonsmooth target. Although not a +coding error, it is nonsmoothness anyway - and smooth optimizers usually +stop right after encountering nonsmoothness, well before reaching solution. + +OptGuard integrity checker helps you to catch such situations: it monitors +function values/gradients being passed to the optimizer and tries to +errors. Upon discovering suspicious pair of points it raises appropriate +flag (and allows you to continue optimization). When optimization is done, +you can study OptGuard result. + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsoptguardsmoothness( + minlbfgsstate state, + const xparams _params = alglib::xdefault); +void alglib::minlbfgsoptguardsmoothness( + minlbfgsstate state, + ae_int_t level, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This family of functions is used to launcn iterations of nonlinear optimizer + +These functions accept following parameters: + state - algorithm state + func - callback which calculates function (or merit function) + value func at given point x + grad - callback which calculates function (or merit function) + value func and gradient grad at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL + +NOTES: + +1. This function has two different implementations: one which uses exact + (analytical) user-supplied gradient, and one which uses function value + only and numerically differentiates function in order to obtain + gradient. + + Depending on the specific function used to create optimizer object + (either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF() + for numerical differentiation) you should choose appropriate variant of + MinLBFGSOptimize() - one which accepts function AND gradient or one + which accepts function ONLY. + + Be careful to choose variant of MinLBFGSOptimize() which corresponds to + your optimization scheme! Table below lists different combinations of + callback (function/gradient) passed to MinLBFGSOptimize() and specific + function used to create optimizer. + + + | USER PASSED TO MinLBFGSOptimize() + CREATED WITH | function only | function and gradient + ------------------------------------------------------------ + MinLBFGSCreateF() | work FAIL + MinLBFGSCreate() | FAIL work + + Here "FAIL" denotes inappropriate combinations of optimizer creation + function and MinLBFGSOptimize() version. Attemps to use such + combination (for example, to create optimizer with MinLBFGSCreateF() and + to pass gradient information to MinCGOptimize()) will lead to exception + being thrown. Either you did not pass gradient when it WAS needed or + you passed gradient when it was NOT needed. + + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void minlbfgsoptimize(minlbfgsstate &state, + void (*func)(const real_1d_array &x, double &func, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minlbfgsoptimize(minlbfgsstate &state, + void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This subroutine submits request for termination of running optimizer. It +should be called from user-supplied callback when user decides that it is +time to "smoothly" terminate optimization process. As result, optimizer +stops at point which was "current accepted" when termination request was +submitted and returns error code 8 (successful termination). + +INPUT PARAMETERS: + State - optimizer structure + +NOTE: after request for termination optimizer may perform several + additional calls to user-supplied callbacks. It does NOT guarantee + to stop immediately - it just guarantees that these additional calls + will be discarded later. + +NOTE: calling this function on optimizer which is NOT running will have no + effect. + +NOTE: multiple calls to this function are possible. First call is counted, + subsequent calls are silently ignored. + + -- ALGLIB -- + Copyright 08.10.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsrequesttermination( + minlbfgsstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine restarts LBFGS algorithm from new point. All optimization +parameters are left unchanged. + +This function allows to solve multiple optimization problems (which +must have same number of dimensions) without object reallocation penalty. + +INPUT PARAMETERS: + State - structure used to store algorithm state + X - new starting point. + + -- ALGLIB -- + Copyright 30.07.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsrestartfrom( + minlbfgsstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +L-BFGS algorithm results + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + X - array[0..N-1], solution + Rep - optimization report: + * Rep.TerminationType completetion code: + * -8 internal integrity control detected infinite + or NAN values in function/gradient. Abnormal + termination signalled. + * -2 rounding errors prevent further improvement. + X contains best point found. + * -1 incorrect parameters were specified + * 1 relative function improvement is no more than + EpsF. + * 2 relative step is no more than EpsX. + * 4 gradient norm is no more than EpsG + * 5 MaxIts steps was taken + * 7 stopping conditions are too stringent, + further improvement is impossible + * 8 terminated by user who called minlbfgsrequesttermination(). + X contains point which was "current accepted" when + termination request was submitted. + * Rep.IterationsCount contains iterations count + * NFEV countains number of function calculations + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsresults( + minlbfgsstate state, + real_1d_array& x, + minlbfgsreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +L-BFGS algorithm results + +Buffered implementation of MinLBFGSResults which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. + + -- ALGLIB -- + Copyright 20.08.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgsresultsbuf( + minlbfgsstate state, + real_1d_array& x, + minlbfgsreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets stopping conditions for L-BFGS optimization algorithm. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsG - >=0 + The subroutine finishes its work if the condition + |v|<EpsG is satisfied, where: + * |.| means Euclidian norm + * v - scaled gradient vector, v[i]=g[i]*s[i] + * g - gradient + * s - scaling coefficients set by MinLBFGSSetScale() + EpsF - >=0 + The subroutine finishes its work if on k+1-th iteration + the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} + is satisfied. + EpsX - >=0 + The subroutine finishes its work if on k+1-th iteration + the condition |v|<=EpsX is fulfilled, where: + * |.| means Euclidian norm + * v - scaled step vector, v[i]=dx[i]/s[i] + * dx - ste pvector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinLBFGSSetScale() + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. + +Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to +automatic stopping criterion selection (small EpsX). + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetcond( + minlbfgsstate state, + double epsg, + double epsf, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +Modification of the preconditioner: Cholesky factorization of approximate +Hessian is used. + +INPUT PARAMETERS: + State - structure which stores algorithm state + P - triangular preconditioner, Cholesky factorization of + the approximate Hessian. array[0..N-1,0..N-1], + (if larger, only leading N elements are used). + IsUpper - whether upper or lower triangle of P is given + (other triangle is not referenced) + +After call to this function preconditioner is changed to P (P is copied +into the internal buffer). + +NOTE: you can change preconditioner "on the fly", during algorithm +iterations. + +NOTE 2: P should be nonsingular. Exception will be thrown otherwise. + + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetpreccholesky( + minlbfgsstate state, + real_2d_array p, + bool isupper, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Modification of the preconditioner: default preconditioner (simple +scaling, same for all elements of X) is used. + +INPUT PARAMETERS: + State - structure which stores algorithm state + +NOTE: you can change preconditioner "on the fly", during algorithm +iterations. + + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetprecdefault( + minlbfgsstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Modification of the preconditioner: diagonal of approximate Hessian is +used. + +INPUT PARAMETERS: + State - structure which stores algorithm state + D - diagonal of the approximate Hessian, array[0..N-1], + (if larger, only leading N elements are used). + +NOTE: you can change preconditioner "on the fly", during algorithm +iterations. + +NOTE 2: D[i] should be positive. Exception will be thrown otherwise. + +NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. + + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetprecdiag( + minlbfgsstate state, + real_1d_array d, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Modification of the preconditioner: scale-based diagonal preconditioning. + +This preconditioning mode can be useful when you don't have approximate +diagonal of Hessian, but you know that your variables are badly scaled +(for example, one variable is in [1,10], and another in [1000,100000]), +and most part of the ill-conditioning comes from different scales of vars. + +In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), +can greatly improve convergence. + +IMPRTANT: you should set scale of your variables with MinLBFGSSetScale() +call (before or after MinLBFGSSetPrecScale() call). Without knowledge of +the scale of your variables scale-based preconditioner will be just unit +matrix. + +INPUT PARAMETERS: + State - structure which stores algorithm state + + -- ALGLIB -- + Copyright 13.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetprecscale( + minlbfgsstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets scaling coefficients for LBFGS optimizer. + +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances). Scale of +the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the function + +Scaling is also used by finite difference variant of the optimizer - step +along I-th axis is equal to DiffStep*S[I]. + +In most optimizers (and in the LBFGS too) scaling is NOT a form of +preconditioning. It just affects stopping conditions. You should set +preconditioner by separate call to one of the MinLBFGSSetPrec...() +functions. + +There is special preconditioning mode, however, which uses scaling +coefficients to form diagonal preconditioning matrix. You can turn this +mode on, if you want. But you should understand that scaling is not the +same thing as preconditioning - these are two different, although related +forms of tuning solver. + +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. + + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetscale( + minlbfgsstate state, + real_1d_array s, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets maximum step length + +INPUT PARAMETERS: + State - structure which stores algorithm state + StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if + you don't want to limit step length. + +Use this subroutine when you optimize target function which contains exp() +or other fast growing functions, and optimization algorithm makes too +large steps which leads to overflow. This function allows us to reject +steps that are too large (and therefore expose us to the possible +overflow) without actually calculating function value at the x+stp*d. + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetstpmax( + minlbfgsstate state, + double stpmax, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function turns on/off reporting. + +INPUT PARAMETERS: + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not + +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to MinLBFGSOptimize(). + + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlbfgssetxrep( + minlbfgsstate state, + bool needxrep, + const xparams _params = alglib::xdefault); + +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
     
     using namespace alglib;
    -void  nsfunc2_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
     {
         //
    -    // this callback calculates function vector
    -    //
    -    //     f0(x0,x1) = 2*|x0|+x1
    -    //     f1(x0,x1) = x0-1
    -    //     f2(x0,x1) = -x1-1
    -    //
    -    // and Jacobian matrix J
    -    //
    -    //         [ df0/dx0   df0/dx1 ]
    -    //     J = [ df1/dx0   df1/dx1 ]
    -    //         [ df2/dx0   df2/dx1 ]
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // and its derivatives df/d0 and df/dx1
         //
    -    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    -    jac[0][0] = 2*alglib::sign(x[0]);
    -    jac[0][1] = alglib::sign(x[1]);
    -    fi[1] = x[0]-1;
    -    jac[1][0] = 1;
    -    jac[1][1] = 0;
    -    fi[2] = -x[1]-1;
    -    jac[2][0] = 0;
    -    jac[2][1] = -1;
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
     }
     
     int main(int argc, char **argv)
    @@ -28558,123 +30244,75 @@
         //
         // This example demonstrates minimization of
         //
    -    //     f(x0,x1) = 2*|x0|+|x1|
    -    //
    -    // subject to combination of equality and inequality constraints
    +    //     f(x,y) = 100*(x+3)^4+(y-3)^4
         //
    -    //      x0  =  1
    -    //      x1 >= -1
    +    // using LBFGS method, with:
    +    // * initial point x=[0,0]
    +    // * unit scale being set for all variables (see minlbfgssetscale for more info)
    +    // * stopping criteria set to "terminate after short enough step"
    +    // * OptGuard integrity check being used to check problem statement
    +    //   for some common errors like nonsmoothness or bad analytic gradient
         //
    -    // using nonsmooth nonlinear optimizer. Although these constraints
    -    // are linear, we treat them as general nonlinear ones in order to
    -    // demonstrate nonlinearly constrained optimization setup.
    +    // First, we create optimizer object and tune its properties
         //
    -    real_1d_array x0 = "[1,1]";
    +    real_1d_array x = "[0,0]";
         real_1d_array s = "[1,1]";
    -    double epsx = 0.00001;
    -    double radius = 0.1;
    -    double rho = 50.0;
    +    double epsg = 0;
    +    double epsf = 0;
    +    double epsx = 0.0000000001;
         ae_int_t maxits = 0;
    -    minnsstate state;
    -    minnsreport rep;
    -    real_1d_array x1;
    +    minlbfgsstate state;
    +    minlbfgscreate(1, x, state);
    +    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    +    minlbfgssetscale(state, s);
     
         //
    -    // Create optimizer object, choose AGS algorithm and tune its settings:
    -    // * radius=0.1     good initial value; will be automatically decreased later.
    -    // * rho=50.0       penalty coefficient for nonlinear constraints. It is your
    -    //                  responsibility to choose good one - large enough that it
    -    //                  enforces constraints, but small enough in order to avoid
    -    //                  extreme slowdown due to ill-conditioning.
    -    // * epsx=0.000001  stopping conditions
    -    // * s=[1,1]        all variables have unit scale
    +    // Activate OptGuard integrity checking.
         //
    -    minnscreate(2, x0, state);
    -    minnssetalgoags(state, radius, rho);
    -    minnssetcond(state, epsx, maxits);
    -    minnssetscale(state, s);
    -
    -    //
    -    // Set general nonlinear constraints.
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target function (C0 continuity violation)
    +    // * nonsmoothness of the target function (C1 continuity violation)
    +    // * erroneous analytic gradient, i.e. one inconsistent with actual
    +    //   change in the target/constraints
         //
    -    // This part is more tricky than working with box/linear constraints - you
    -    // can not "pack" general nonlinear function into double precision array.
    -    // That's why minnssetnlc() does not accept constraints itself - only
    -    // constraint COUNTS are passed: first parameter is number of equality
    -    // constraints, second one is number of inequality constraints.
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
         //
    -    // As for constraining functions - these functions are passed as part
    -    // of problem Jacobian (see below).
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION. DO NOT USE IT IN PRODUCTION CODE!!!!!!!
         //
    -    // NOTE: MinNS optimizer supports arbitrary combination of boundary, general
    -    //       linear and general nonlinear constraints. This example does not
    -    //       show how to work with general linear constraints, but you can
    -    //       easily find it in documentation on minnlcsetlc() function.
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
         //
    -    minnssetnlc(state, 1, 1);
    +    minlbfgsoptguardsmoothness(state);
    +    minlbfgsoptguardgradient(state, 0.001);
     
         //
    -    // Optimize and test results.
    -    //
    -    // Optimizer object accepts vector function and its Jacobian, with first
    -    // component (Jacobian row) being target function, and next components
    -    // (Jacobian rows) being nonlinear equality and inequality constraints
    -    // (box/linear ones are passed separately by means of minnssetbc() and
    -    // minnssetlc() calls).
    -    //
    -    // Nonlinear equality constraints have form Gi(x)=0, inequality ones
    -    // have form Hi(x)<=0, so we may have to "normalize" constraints prior
    -    // to passing them to optimizer (right side is zero, constraints are
    -    // sorted, multiplied by -1 when needed).
    +    // Optimize and examine results.
         //
    -    // So, our vector function has form
    +    minlbfgsreport rep;
    +    alglib::minlbfgsoptimize(state, function1_grad);
    +    minlbfgsresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
         //
    -    //     {f0,f1,f2} = { 2*|x0|+|x1|,  x0-1, -x1-1 }
    +    // Check that OptGuard did not report errors
         //
    -    // with Jacobian
    +    // NOTE: want to test OptGuard? Try breaking the gradient - say, add
    +    //       1.0 to some of its components.
         //
    -    //         [ 2*sign(x0)   sign(x1) ]
    -    //     J = [     1           0     ]
    -    //         [     0          -1     ]
    -    //
    -    // which means that we have optimization problem
    -    //
    -    //     min{f0} subject to f1=0, f2<=0
    -    //
    -    // which is essentially same as
    -    //
    -    //     min { 2*|x0|+|x1| } subject to x0=1, x1>=-1
    -    //
    -    // NOTE: AGS solver used by us can handle nonsmooth and nonconvex
    -    //       optimization problems. It has convergence guarantees, i.e. it will
    -    //       converge to stationary point of the function after running for some
    -    //       time.
    -    //
    -    //       However, it is important to remember that "stationary point" is not
    -    //       equal to "solution". If your problem is convex, everything is OK.
    -    //       But nonconvex optimization problems may have "flat spots" - large
    -    //       areas where gradient is exactly zero, but function value is far away
    -    //       from optimal. Such areas are stationary points too, and optimizer
    -    //       may be trapped here.
    -    //
    -    //       "Flat spots" are nonsmooth equivalent of the saddle points, but with
    -    //       orders of magnitude worse properties - they may be quite large and
    -    //       hard to avoid. All nonsmooth optimizers are prone to this kind of the
    -    //       problem, because it is impossible to automatically distinguish "flat
    -    //       spot" from true solution.
    -    //
    -    //       This note is here to warn you that you should be very careful when
    -    //       you solve nonsmooth optimization problems. Visual inspection of
    -    //       results is essential.
    -    //
    -    alglib::minnsoptimize(state, nsfunc2_jac);
    -    minnsresults(state, x1, rep);
    -    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [1.0000,0.0000]
    +    optguardreport ogrep;
    +    minlbfgsoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -28683,1682 +30321,10954 @@
     #include "optimization.h"
     
     using namespace alglib;
    -void  nsfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
     {
         //
    -    // this callback calculates
    -    //
    -    //     f0(x0,x1) = 2*|x0|+x1
    -    //
    -    // and Jacobian matrix J = [df0/dx0 df0/dx1]
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // and its derivatives df/d0 and df/dx1
         //
    -    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    -    jac[0][0] = 2*alglib::sign(x[0]);
    -    jac[0][1] = alglib::sign(x[1]);
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
     }
     
     int main(int argc, char **argv)
     {
         //
    -    // This example demonstrates minimization of
    -    //
    -    //     f(x0,x1) = 2*|x0|+|x1|
    +    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    +    // using LBFGS method.
         //
    -    // using nonsmooth nonlinear optimizer.
    +    // Several advanced techniques are demonstrated:
    +    // * upper limit on step size
    +    // * restart from new point
         //
    -    real_1d_array x0 = "[1,1]";
    +    real_1d_array x = "[0,0]";
         real_1d_array s = "[1,1]";
    -    double epsx = 0.00001;
    -    double radius = 0.1;
    -    double rho = 0.0;
    +    double epsg = 0;
    +    double epsf = 0;
    +    double epsx = 0.0000000001;
    +    double stpmax = 0.1;
         ae_int_t maxits = 0;
    -    minnsstate state;
    -    minnsreport rep;
    -    real_1d_array x1;
    +    minlbfgsstate state;
    +    minlbfgsreport rep;
     
    -    //
    -    // Create optimizer object, choose AGS algorithm and tune its settings:
    -    // * radius=0.1     good initial value; will be automatically decreased later.
    -    // * rho=0.0        penalty coefficient for nonlinear constraints; can be zero
    -    //                  because we do not have such constraints
    -    // * epsx=0.000001  stopping conditions
    -    // * s=[1,1]        all variables have unit scale
    -    //
    -    minnscreate(2, x0, state);
    -    minnssetalgoags(state, radius, rho);
    -    minnssetcond(state, epsx, maxits);
    -    minnssetscale(state, s);
    +    // create and tune optimizer
    +    minlbfgscreate(1, x, state);
    +    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    +    minlbfgssetstpmax(state, stpmax);
    +    minlbfgssetscale(state, s);
     
    +    // Set up OptGuard integrity checker which catches errors
    +    // like nonsmooth targets or errors in the analytic gradient.
         //
    -    // Optimize and test results.
    -    //
    -    // Optimizer object accepts vector function and its Jacobian, with first
    -    // component (Jacobian row) being target function, and next components
    -    // (Jacobian rows) being nonlinear equality and inequality constraints
    -    // (box/linear ones are passed separately by means of minnssetbc() and
    -    // minnssetlc() calls).
    -    //
    -    // If you do not have nonlinear constraints (exactly our situation), then
    -    // you will have one-component function vector and 1xN Jacobian matrix.
    -    //
    -    // So, our vector function has form
    -    //
    -    //     {f0} = { 2*|x0|+|x1| }
    -    //
    -    // with Jacobian
    -    //
    -    //         [                       ]
    -    //     J = [ 2*sign(x0)   sign(x1) ]
    -    //         [                       ]
    -    //
    -    // NOTE: nonsmooth optimizer requires considerably more function
    -    //       evaluations than smooth solver - about 2N times more. Using
    -    //       numerical differentiation introduces additional (multiplicative)
    -    //       2N speedup.
    -    //
    -    //       It means that if smooth optimizer WITH user-supplied gradient
    -    //       needs 100 function evaluations to solve 50-dimensional problem,
    -    //       then AGS solver with user-supplied gradient will need about 10.000
    -    //       function evaluations, and with numerical gradient about 1.000.000
    -    //       function evaluations will be performed.
    +    // OptGuard is essential at the early prototyping stages.
         //
    -    // NOTE: AGS solver used by us can handle nonsmooth and nonconvex
    -    //       optimization problems. It has convergence guarantees, i.e. it will
    -    //       converge to stationary point of the function after running for some
    -    //       time.
    +    // NOTE: gradient verification needs 3*N additional function
    +    //       evaluations; DO NOT USE IT IN THE PRODUCTION CODE
    +    //       because it leads to unnecessary slowdown of your app.
    +    minlbfgsoptguardsmoothness(state);
    +    minlbfgsoptguardgradient(state, 0.001);
    +
    +    // first run
    +    alglib::minlbfgsoptimize(state, function1_grad);
    +    minlbfgsresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
    +    // second run - algorithm is restarted
    +    x = "[10,10]";
    +    minlbfgsrestartfrom(state, x);
    +    alglib::minlbfgsoptimize(state, function1_grad);
    +    minlbfgsresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
    +
    +    // check OptGuard integrity report. Why do we need it at all?
    +    // Well, try breaking the gradient by adding 1.0 to some
    +    // of its components - OptGuard should report it as error.
    +    // And it may also catch unintended errors too :)
    +    optguardreport ogrep;
    +    minlbfgsoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void function1_func(const real_1d_array &x, double &func, void *ptr)
    +{
         //
    -    //       However, it is important to remember that "stationary point" is not
    -    //       equal to "solution". If your problem is convex, everything is OK.
    -    //       But nonconvex optimization problems may have "flat spots" - large
    -    //       areas where gradient is exactly zero, but function value is far away
    -    //       from optimal. Such areas are stationary points too, and optimizer
    -    //       may be trapped here.
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
         //
    -    //       "Flat spots" are nonsmooth equivalent of the saddle points, but with
    -    //       orders of magnitude worse properties - they may be quite large and
    -    //       hard to avoid. All nonsmooth optimizers are prone to this kind of the
    -    //       problem, because it is impossible to automatically distinguish "flat
    -    //       spot" from true solution.
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +}
    +
    +int main(int argc, char **argv)
    +{
         //
    -    //       This note is here to warn you that you should be very careful when
    -    //       you solve nonsmooth optimization problems. Visual inspection of
    -    //       results is essential.
    +    // This example demonstrates minimization of f(x,y) = 100*(x+3)^4+(y-3)^4
    +    // using numerical differentiation to calculate gradient.
         //
    -    alglib::minnsoptimize(state, nsfunc1_jac);
    -    minnsresults(state, x1, rep);
    -    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [0.0000,0.0000]
    +    real_1d_array x = "[0,0]";
    +    double epsg = 0.0000000001;
    +    double epsf = 0;
    +    double epsx = 0;
    +    double diffstep = 1.0e-6;
    +    ae_int_t maxits = 0;
    +    minlbfgsstate state;
    +    minlbfgsreport rep;
    +
    +    minlbfgscreatef(1, x, diffstep, state);
    +    minlbfgssetcond(state, epsg, epsf, epsx, maxits);
    +    alglib::minlbfgsoptimize(state, function1_func);
    +    minlbfgsresults(state, x, rep);
    +
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,3]
         return 0;
     }
     
     
    -
    +
    -minqpreport
    -minqpstate
    +minlmreport
    +minlmstate
    -minqpcreate
    -minqpoptimize
    -minqpresults
    -minqpresultsbuf
    -minqpsetalgobleic
    -minqpsetalgocholesky
    -minqpsetalgoquickqp
    -minqpsetbc
    -minqpsetlc
    -minqpsetlinearterm
    -minqpsetorigin
    -minqpsetquadraticterm
    -minqpsetquadratictermsparse
    -minqpsetscale
    -minqpsetstartingpoint
    +minlmcreatefgh
    +minlmcreatefgj
    +minlmcreatefj
    +minlmcreatev
    +minlmcreatevgj
    +minlmcreatevj
    +minlmoptguardgradient
    +minlmoptguardresults
    +minlmoptimize
    +minlmrequesttermination
    +minlmrestartfrom
    +minlmresults
    +minlmresultsbuf
    +minlmsetacctype
    +minlmsetbc
    +minlmsetcond
    +minlmsetlc
    +minlmsetscale
    +minlmsetstpmax
    +minlmsetxrep
    - - - - - + + + + +
    minqp_d_bc1 Bound constrained dense quadratic programming
    minqp_d_lc1 Linearly constrained dense quadratic programming
    minqp_d_nonconvex Nonconvex quadratic programming
    minqp_d_u1 Unconstrained dense quadratic programming
    minqp_d_u2 Unconstrained sparse quadratic programming
    minlm_d_fgh Nonlinear Hessian-based optimization for general functions
    minlm_d_restarts Efficient restarts of LM optimizer
    minlm_d_v Nonlinear least squares optimization using function vector only
    minlm_d_vb Bound constrained nonlinear least squares optimization
    minlm_d_vj Nonlinear least squares optimization using function vector and Jacobian
    - +
     
    /************************************************************************* -This structure stores optimization report: -* InnerIterationsCount number of inner iterations -* OuterIterationsCount number of outer iterations -* NCholesky number of Cholesky decomposition -* NMV number of matrix-vector products - (only products calculated as part of iterative - process are counted) -* TerminationType completion code (see below) +Optimization report, filled by MinLMResults() function + +FIELDS: +* TerminationType, completetion code: + * -8 optimizer detected NAN/INF values either in the function itself, + or in its Jacobian + * -5 inappropriate solver was used: + * solver created with minlmcreatefgh() used on problem with + general linear constraints (set with minlmsetlc() call). + * -3 constraints are inconsistent + * 2 relative step is no more than EpsX. + * 5 MaxIts steps was taken + * 7 stopping conditions are too stringent, + further improvement is impossible + * 8 terminated by user who called MinLMRequestTermination(). + X contains point which was "current accepted" when termination + request was submitted. +* IterationsCount, contains iterations count +* NFunc, number of function calculations +* NJac, number of Jacobi matrix calculations +* NGrad, number of gradient calculations +* NHess, number of Hessian calculations +* NCholesky, number of Cholesky decomposition calculations +*************************************************************************/ +
    class minlmreport +{ + ae_int_t iterationscount; + ae_int_t terminationtype; + ae_int_t nfunc; + ae_int_t njac; + ae_int_t ngrad; + ae_int_t nhess; + ae_int_t ncholesky; +}; + +
    + +
    +
    /************************************************************************* +Levenberg-Marquardt optimizer. + +This structure should be created using one of the MinLMCreate???() +functions. You should not access its fields directly; use ALGLIB functions +to work with it. +*************************************************************************/ +
    class minlmstate +{ +}; + +
    + +
    +
    /************************************************************************* + LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION + +DESCRIPTION: +This function is used to find minimum of general form (not "sum-of- +-squares") function + F = F(x[0], ..., x[n-1]) +using its gradient and Hessian. Levenberg-Marquardt modification with +L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization +after each Levenberg-Marquardt step is used. + + +REQUIREMENTS: +This algorithm will request following information during its operation: + +* function value F at given point X +* F and gradient G (simultaneously) at given point X +* F, G and Hessian H (simultaneously) at given point X + +There are several overloaded versions of MinLMOptimize() function which +correspond to different LM-like optimization algorithms provided by this +unit. You should choose version which accepts func(), grad() and hess() +function pointers. First pointer is used to calculate F at given point, +second one calculates F(x) and grad F(x), third one calculates F(x), +grad F(x), hess F(x). + +You can try to initialize MinLMState structure with FGH-function and then +use incorrect version of MinLMOptimize() (for example, version which does +not provide Hessian matrix), but it will lead to exception being thrown +after first attempt to calculate Hessian. + + +USAGE: +1. User initializes algorithm state with MinLMCreateFGH() call +2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and + other functions +3. User calls MinLMOptimize() function which takes algorithm state and + pointers (delegates, etc.) to callback functions. +4. User calls MinLMResults() to get solution +5. Optionally, user may call MinLMRestartFrom() to solve another problem + with same N but another starting point and/or another function. + MinLMRestartFrom() allows to reuse already initialized structure. + + +INPUT PARAMETERS: + N - dimension, N>1 + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + X - initial solution, array[0..N-1] + +OUTPUT PARAMETERS: + State - structure which stores algorithm state + +NOTES: +1. you may tune stopping conditions with MinLMSetCond() function +2. if target function contains exp() or other fast growing functions, and + optimization algorithm makes too large steps which leads to overflow, + use MinLMSetStpMax() function to bound algorithm's steps. + + -- ALGLIB -- + Copyright 30.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmcreatefgh( + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlmcreatefgh( + ae_int_t n, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This is obsolete function. + +Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). + + -- ALGLIB -- + Copyright 30.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmcreatefgj( + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlmcreatefgj( + ae_int_t n, + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function is considered obsolete since ALGLIB 3.1.0 and is present for +backward compatibility only. We recommend to use MinLMCreateVJ, which +provides similar, but more consistent and feature-rich interface. + + -- ALGLIB -- + Copyright 30.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmcreatefj( + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlmcreatefj( + ae_int_t n, + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* + IMPROVED LEVENBERG-MARQUARDT METHOD FOR + NON-LINEAR LEAST SQUARES OPTIMIZATION + +DESCRIPTION: +This function is used to find minimum of function which is represented as +sum of squares: + F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) +using value of function vector f[] only. Finite differences are used to +calculate Jacobian. + + +REQUIREMENTS: +This algorithm will request following information during its operation: +* function vector f[] at given point X + +There are several overloaded versions of MinLMOptimize() function which +correspond to different LM-like optimization algorithms provided by this +unit. You should choose version which accepts fvec() callback. + +You can try to initialize MinLMState structure with VJ function and then +use incorrect version of MinLMOptimize() (for example, version which +works with general form function and does not accept function vector), but +it will lead to exception being thrown after first attempt to calculate +Jacobian. + + +USAGE: +1. User initializes algorithm state with MinLMCreateV() call +2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and + other functions +3. User calls MinLMOptimize() function which takes algorithm state and + callback functions. +4. User calls MinLMResults() to get solution +5. Optionally, user may call MinLMRestartFrom() to solve another problem + with same N/M but another starting point and/or another function. + MinLMRestartFrom() allows to reuse already initialized structure. + + +INPUT PARAMETERS: + N - dimension, N>1 + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + M - number of functions f[i] + X - initial solution, array[0..N-1] + DiffStep- differentiation step, >0 + +OUTPUT PARAMETERS: + State - structure which stores algorithm state + +See also MinLMIteration, MinLMResults. + +NOTES: +1. you may tune stopping conditions with MinLMSetCond() function +2. if target function contains exp() or other fast growing functions, and + optimization algorithm makes too large steps which leads to overflow, + use MinLMSetStpMax() function to bound algorithm's steps. + + -- ALGLIB -- + Copyright 30.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmcreatev( + ae_int_t m, + real_1d_array x, + double diffstep, + minlmstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlmcreatev( + ae_int_t n, + ae_int_t m, + real_1d_array x, + double diffstep, + minlmstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This is obsolete function. + +Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). + + -- ALGLIB -- + Copyright 30.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmcreatevgj( + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlmcreatevgj( + ae_int_t n, + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* + IMPROVED LEVENBERG-MARQUARDT METHOD FOR + NON-LINEAR LEAST SQUARES OPTIMIZATION + +DESCRIPTION: +This function is used to find minimum of function which is represented as +sum of squares: + F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) +using value of function vector f[] and Jacobian of f[]. + + +REQUIREMENTS: +This algorithm will request following information during its operation: + +* function vector f[] at given point X +* function vector f[] and Jacobian of f[] (simultaneously) at given point + +There are several overloaded versions of MinLMOptimize() function which +correspond to different LM-like optimization algorithms provided by this +unit. You should choose version which accepts fvec() and jac() callbacks. +First one is used to calculate f[] at given point, second one calculates +f[] and Jacobian df[i]/dx[j]. + +You can try to initialize MinLMState structure with VJ function and then +use incorrect version of MinLMOptimize() (for example, version which +works with general form function and does not provide Jacobian), but it +will lead to exception being thrown after first attempt to calculate +Jacobian. + + +USAGE: +1. User initializes algorithm state with MinLMCreateVJ() call +2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and + other functions +3. User calls MinLMOptimize() function which takes algorithm state and + callback functions. +4. User calls MinLMResults() to get solution +5. Optionally, user may call MinLMRestartFrom() to solve another problem + with same N/M but another starting point and/or another function. + MinLMRestartFrom() allows to reuse already initialized structure. + + +INPUT PARAMETERS: + N - dimension, N>1 + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + M - number of functions f[i] + X - initial solution, array[0..N-1] + +OUTPUT PARAMETERS: + State - structure which stores algorithm state + +NOTES: +1. you may tune stopping conditions with MinLMSetCond() function +2. if target function contains exp() or other fast growing functions, and + optimization algorithm makes too large steps which leads to overflow, + use MinLMSetStpMax() function to bound algorithm's steps. + + -- ALGLIB -- + Copyright 30.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmcreatevj( + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); +void alglib::minlmcreatevj( + ae_int_t n, + ae_int_t m, + real_1d_array x, + minlmstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function activates/deactivates verification of the user-supplied +analytic Jacobian. + +Upon activation of this option OptGuard integrity checker performs +numerical differentiation of your target function vector at the initial +point (note: future versions may also perform check at the final point) +and compares numerical Jacobian with analytic one provided by you. + +If difference is too large, an error flag is set and optimization session +continues. After optimization session is over, you can retrieve the report +which stores both Jacobians, and specific components highlighted as +suspicious by the OptGuard. + +The OptGuard report can be retrieved with minlmoptguardresults(). + +IMPORTANT: gradient check is a high-overhead option which will cost you + about 3*N additional function evaluations. In many cases it may + cost as much as the rest of the optimization session. + + YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO + CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. + +NOTE: unlike previous incarnation of the gradient checking code, OptGuard + does NOT interrupt optimization even if it discovers bad gradient. + +INPUT PARAMETERS: + State - structure used to store algorithm state + TestStep - verification step used for numerical differentiation: + * TestStep=0 turns verification off + * TestStep>0 activates verification + You should carefully choose TestStep. Value which is + too large (so large that function behavior is non- + cubic at this scale) will lead to false alarms. Too + short step will result in rounding errors dominating + numerical derivative. + + You may use different step for different parameters by + means of setting scale with minlmsetscale(). + +=== EXPLANATION ========================================================== + +In order to verify gradient algorithm performs following steps: + * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], + where X[i] is i-th component of the initial point and S[i] is a scale + of i-th parameter + * F(X) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point + + -- ALGLIB -- + Copyright 15.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmoptguardgradient( + minlmstate state, + double teststep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +Results of OptGuard integrity check, should be called after optimization +session is over. + +OptGuard checks analytic Jacobian against reference value obtained by +numerical differentiation with user-specified step. + +NOTE: other optimizers perform additional OptGuard checks for things like + C0/C1-continuity violations. However, LM optimizer can check only + for incorrect Jacobian. + + The reason is that unlike line search methods LM optimizer does not + perform extensive evaluations along the line. Thus, we simply do not + have enough data to catch C0/C1-violations. + +This check is activated with minlmoptguardgradient() function. + +Following flags are set when these errors are suspected: +* rep.badgradsuspected, and additionally: + * rep.badgradfidx for specific function (Jacobian row) suspected + * rep.badgradvidx for specific variable (Jacobian column) suspected + * rep.badgradxbase, a point where gradient/Jacobian is tested + * rep.badgraduser, user-provided gradient/Jacobian + * rep.badgradnum, reference gradient/Jacobian obtained via numerical + differentiation + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + rep - OptGuard report + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmoptguardresults( + minlmstate state, + optguardreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This family of functions is used to launcn iterations of nonlinear optimizer + +These functions accept following parameters: + state - algorithm state + func - callback which calculates function (or merit function) + value func at given point x + grad - callback which calculates function (or merit function) + value func and gradient grad at given point x + hess - callback which calculates function (or merit function) + value func, gradient grad and Hessian hess at given point x + fvec - callback which calculates function vector fi[] + at given point x + jac - callback which calculates function vector fi[] + and Jacobian jac at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL + +NOTES: + +1. Depending on function used to create state structure, this algorithm + may accept Jacobian and/or Hessian and/or gradient. According to the + said above, there ase several versions of this function, which accept + different sets of callbacks. + + This flexibility opens way to subtle errors - you may create state with + MinLMCreateFGH() (optimization using Hessian), but call function which + does not accept Hessian. So when algorithm will request Hessian, there + will be no callback to call. In this case exception will be thrown. + + Be careful to avoid such errors because there is no way to find them at + compile time - you can see them at runtime only. + + -- ALGLIB -- + Copyright 10.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void minlmoptimize(minlmstate &state, + void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minlmoptimize(minlmstate &state, + void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), + void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minlmoptimize(minlmstate &state, + void (*func)(const real_1d_array &x, double &func, void *ptr), + void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), + void (*hess)(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minlmoptimize(minlmstate &state, + void (*func)(const real_1d_array &x, double &func, void *ptr), + void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minlmoptimize(minlmstate &state, + void (*func)(const real_1d_array &x, double &func, void *ptr), + void (*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), + void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +This subroutine submits request for termination of running optimizer. It +should be called from user-supplied callback when user decides that it is +time to "smoothly" terminate optimization process. As result, optimizer +stops at point which was "current accepted" when termination request was +submitted and returns error code 8 (successful termination). + +INPUT PARAMETERS: + State - optimizer structure + +NOTE: after request for termination optimizer may perform several + additional calls to user-supplied callbacks. It does NOT guarantee + to stop immediately - it just guarantees that these additional calls + will be discarded later. + +NOTE: calling this function on optimizer which is NOT running will have no + effect. + +NOTE: multiple calls to this function are possible. First call is counted, + subsequent calls are silently ignored. + + -- ALGLIB -- + Copyright 08.10.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmrequesttermination( + minlmstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine restarts LM algorithm from new point. All optimization +parameters are left unchanged. + +This function allows to solve multiple optimization problems (which +must have same number of dimensions) without object reallocation penalty. + +INPUT PARAMETERS: + State - structure used for reverse communication previously + allocated with MinLMCreateXXX call. + X - new starting point. + + -- ALGLIB -- + Copyright 30.07.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmrestartfrom( + minlmstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +Levenberg-Marquardt algorithm results + +NOTE: if you activated OptGuard integrity checking functionality and want + to get OptGuard report, it can be retrieved with the help of + minlmoptguardresults() function. + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + X - array[0..N-1], solution + Rep - optimization report; includes termination codes and + additional information. Termination codes are listed below, + see comments for this structure for more info. + Termination code is stored in rep.terminationtype field: + * -8 optimizer detected NAN/INF values either in the + function itself, or in its Jacobian + * -3 constraints are inconsistent + * 2 relative step is no more than EpsX. + * 5 MaxIts steps was taken + * 7 stopping conditions are too stringent, + further improvement is impossible + * 8 terminated by user who called minlmrequesttermination(). + X contains point which was "current accepted" when + termination request was submitted. + + -- ALGLIB -- + Copyright 10.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmresults( + minlmstate state, + real_1d_array& x, + minlmreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +Levenberg-Marquardt algorithm results + +Buffered implementation of MinLMResults(), which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. + + -- ALGLIB -- + Copyright 10.03.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmresultsbuf( + minlmstate state, + real_1d_array& x, + minlmreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function is used to change acceleration settings + +You can choose between three acceleration strategies: +* AccType=0, no acceleration. +* AccType=1, secant updates are used to update quadratic model after each + iteration. After fixed number of iterations (or after model breakdown) + we recalculate quadratic model using analytic Jacobian or finite + differences. Number of secant-based iterations depends on optimization + settings: about 3 iterations - when we have analytic Jacobian, up to 2*N + iterations - when we use finite differences to calculate Jacobian. + +AccType=1 is recommended when Jacobian calculation cost is prohibitively +high (several Mx1 function vector calculations followed by several NxN +Cholesky factorizations are faster than calculation of one M*N Jacobian). +It should also be used when we have no Jacobian, because finite difference +approximation takes too much time to compute. + +Table below list optimization protocols (XYZ protocol corresponds to +MinLMCreateXYZ) and acceleration types they support (and use by default). + +ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS: + +protocol 0 1 comment +V + + +VJ + + +FGH + + +DEFAULT VALUES: + +protocol 0 1 comment +V x without acceleration it is so slooooooooow +VJ x +FGH x + +NOTE: this function should be called before optimization. Attempt to call +it during algorithm iterations may result in unexpected behavior. + +NOTE: attempt to call this function with unsupported protocol/acceleration +combination will result in exception being thrown. + + -- ALGLIB -- + Copyright 14.10.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetacctype( + minlmstate state, + ae_int_t acctype, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets boundary constraints for LM optimizer + +Boundary constraints are inactive by default (after initial creation). +They are preserved until explicitly turned off with another SetBC() call. + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[N]. + If some (all) variables are unbounded, you may specify + very small number or -INF (latter is recommended because + it will allow solver to use better algorithm). + BndU - upper bounds, array[N]. + If some (all) variables are unbounded, you may specify + very large number or +INF (latter is recommended because + it will allow solver to use better algorithm). + +NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th +variable will be "frozen" at X[i]=BndL[i]=BndU[i]. + +NOTE 2: this solver has following useful properties: +* bound constraints are always satisfied exactly +* function is evaluated only INSIDE area specified by bound constraints + or at its boundary + + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetbc( + minlmstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets stopping conditions for Levenberg-Marquardt optimization +algorithm. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsX - >=0 + The subroutine finishes its work if on k+1-th iteration + the condition |v|<=EpsX is fulfilled, where: + * |.| means Euclidian norm + * v - scaled step vector, v[i]=dx[i]/s[i] + * dx - ste pvector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinLMSetScale() + Recommended values: 1E-9 ... 1E-12. + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. Only Levenberg-Marquardt + iterations are counted (L-BFGS/CG iterations are NOT + counted because their cost is very low compared to that of + LM). + +Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic +stopping criterion selection (small EpsX). + +NOTE: it is not recommended to set large EpsX (say, 0.001). Because LM is + a second-order method, it performs very precise steps anyway. + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetcond( + minlmstate state, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +This function sets general linear constraints for LM optimizer + +Linear constraints are inactive by default (after initial creation). They +are preserved until explicitly turned off with another minlmsetlc() call. + +INPUT PARAMETERS: + State - structure stores algorithm state + C - linear constraints, array[K,N+1]. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT + +IMPORTANT: if you have linear constraints, it is strongly recommended to + set scale of variables with minlmsetscale(). QP solver which is + used to calculate linearly constrained steps heavily relies on + good scaling of input problems. + +IMPORTANT: solvers created with minlmcreatefgh() do not support linear + constraints. + +NOTE: linear (non-bound) constraints are satisfied only approximately - + there always exists some violation due to numerical errors and + algorithmic limitations. + +NOTE: general linear constraints add significant overhead to solution + process. Although solver performs roughly same amount of iterations + (when compared with similar box-only constrained problem), each + iteration now involves solution of linearly constrained QP + subproblem, which requires ~3-5 times more Cholesky decompositions. + Thus, if you can reformulate your problem in such way this it has + only box constraints, it may be beneficial to do so. + + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetlc( + minlmstate state, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::minlmsetlc( + minlmstate state, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets scaling coefficients for LM optimizer. + +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances). Scale of +the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the function + +Generally, scale is NOT considered to be a form of preconditioner. But LM +optimizer is unique in that it uses scaling matrix both in the stopping +condition tests and as Marquardt damping factor. + +Proper scaling is very important for the algorithm performance. It is less +important for the quality of results, but still has some influence (it is +easier to converge when variables are properly scaled, so premature +stopping is possible when very badly scalled variables are combined with +relaxed stopping conditions). + +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. + + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetscale( + minlmstate state, + real_1d_array s, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets maximum step length + +INPUT PARAMETERS: + State - structure which stores algorithm state + StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't + want to limit step length. + +Use this subroutine when you optimize target function which contains exp() +or other fast growing functions, and optimization algorithm makes too +large steps which leads to overflow. This function allows us to reject +steps that are too large (and therefore expose us to the possible +overflow) without actually calculating function value at the x+stp*d. + +NOTE: non-zero StpMax leads to moderate performance degradation because +intermediate step of preconditioned L-BFGS optimization is incompatible +with limits on step size. + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetstpmax( + minlmstate state, + double stpmax, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function turns on/off reporting. + +INPUT PARAMETERS: + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not + +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS +iterations are reported. + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlmsetxrep( + minlmstate state, + bool needxrep, + const xparams _params = alglib::xdefault); + +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void function1_func(const real_1d_array &x, double &func, void *ptr)
    +{
    +    //
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    //
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +}
    +void function1_grad(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr) 
    +{
    +    //
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // and its derivatives df/d0 and df/dx1
    +    //
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
    +}
    +void function1_hess(const real_1d_array &x, double &func, real_1d_array &grad, real_2d_array &hess, void *ptr)
    +{
    +    //
    +    // this callback calculates f(x0,x1) = 100*(x0+3)^4 + (x1-3)^4
    +    // its derivatives df/d0 and df/dx1
    +    // and its Hessian.
    +    //
    +    func = 100*pow(x[0]+3,4) + pow(x[1]-3,4);
    +    grad[0] = 400*pow(x[0]+3,3);
    +    grad[1] = 4*pow(x[1]-3,3);
    +    hess[0][0] = 1200*pow(x[0]+3,2);
    +    hess[0][1] = 0;
    +    hess[1][0] = 0;
    +    hess[1][1] = 12*pow(x[1]-3,2);
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = 100*(x0+3)^4+(x1-3)^4
    +    // using "FGH" mode of the Levenberg-Marquardt optimizer.
    +    //
    +    // F is treated like a monolitic function without internal structure,
    +    // i.e. we do NOT represent it as a sum of squares.
    +    //
    +    // Optimization algorithm uses:
    +    // * function value F(x0,x1)
    +    // * gradient G={dF/dxi}
    +    // * Hessian H={d2F/(dxi*dxj)}
    +    //
    +    real_1d_array x = "[0,0]";
    +    double epsx = 0.0000000001;
    +    ae_int_t maxits = 0;
    +    minlmstate state;
    +    minlmreport rep;
    +
    +    minlmcreatefgh(x, state);
    +    minlmsetcond(state, epsx, maxits);
    +    alglib::minlmoptimize(state, function1_func, function1_grad, function1_hess);
    +    minlmresults(state, x, rep);
    +
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    // f0(x0,x1) = 100*(x0+3)^4,
    +    // f1(x0,x1) = (x1-3)^4
    +    //
    +    fi[0] = 10*pow(x[0]+3,2);
    +    fi[1] = pow(x[1]-3,2);
    +}
    +void  function2_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    // f0(x0,x1) = x0^2+1
    +    // f1(x0,x1) = x1-1
    +    //
    +    fi[0] = x[0]*x[0]+1;
    +    fi[1] = x[1]-1;
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    +    //
    +    //     f0(x0,x1) = 10*(x0+3)^2
    +    //     f1(x0,x1) = (x1-3)^2
    +    //
    +    // using several starting points and efficient restarts.
    +    //
    +    real_1d_array x;
    +    double epsx = 0.0000000001;
    +    ae_int_t maxits = 0;
    +    minlmstate state;
    +    minlmreport rep;
    +
    +    //
    +    // create optimizer using minlmcreatev()
    +    //
    +    x = "[10,10]";
    +    minlmcreatev(2, x, 0.0001, state);
    +    minlmsetcond(state, epsx, maxits);
    +    alglib::minlmoptimize(state, function1_fvec);
    +    minlmresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +
    +    //
    +    // restart optimizer using minlmrestartfrom()
    +    //
    +    // we can use different starting point, different function,
    +    // different stopping conditions, but problem size
    +    // must remain unchanged.
    +    //
    +    x = "[4,4]";
    +    minlmrestartfrom(state, x);
    +    alglib::minlmoptimize(state, function2_fvec);
    +    minlmresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [0,1]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    // f0(x0,x1) = 100*(x0+3)^4,
    +    // f1(x0,x1) = (x1-3)^4
    +    //
    +    fi[0] = 10*pow(x[0]+3,2);
    +    fi[1] = pow(x[1]-3,2);
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    +    //
    +    //     f0(x0,x1) = 10*(x0+3)^2
    +    //     f1(x0,x1) = (x1-3)^2
    +    //
    +    // using "V" mode of the Levenberg-Marquardt optimizer.
    +    //
    +    // Optimization algorithm uses:
    +    // * function vector f[] = {f1,f2}
    +    //
    +    // No other information (Jacobian, gradient, etc.) is needed.
    +    //
    +    real_1d_array x = "[0,0]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.0000000001;
    +    ae_int_t maxits = 0;
    +    minlmstate state;
    +    minlmreport rep;
    +
    +    //
    +    // Create optimizer, tell it to:
    +    // * use numerical differentiation with step equal to 0.0001
    +    // * use unit scale for all variables (s is a unit vector)
    +    // * stop after short enough step (less than epsx)
    +    //
    +    minlmcreatev(2, x, 0.0001, state);
    +    minlmsetcond(state, epsx, maxits);
    +    minlmsetscale(state, s);
    +
    +    //
    +    // Optimize
    +    //
    +    alglib::minlmoptimize(state, function1_fvec);
    +
    +    //
    +    // Test optimization results
    +    //
    +    // NOTE: because we use numerical differentiation, we do not
    +    //       verify Jacobian correctness - it is always "correct".
    +    //       However, if you switch to analytic gradient, consider
    +    //       checking it with OptGuard (see other examples).
    +    //
    +    minlmresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    // f0(x0,x1) = 100*(x0+3)^4,
    +    // f1(x0,x1) = (x1-3)^4
    +    //
    +    fi[0] = 10*pow(x[0]+3,2);
    +    fi[1] = pow(x[1]-3,2);
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    +    //
    +    //     f0(x0,x1) = 10*(x0+3)^2
    +    //     f1(x0,x1) = (x1-3)^2
    +    //
    +    // with boundary constraints
    +    //
    +    //     -1 <= x0 <= +1
    +    //     -1 <= x1 <= +1
    +    //
    +    // using "V" mode of the Levenberg-Marquardt optimizer.
    +    //
    +    // Optimization algorithm uses:
    +    // * function vector f[] = {f1,f2}
    +    //
    +    // No other information (Jacobian, gradient, etc.) is needed.
    +    //
    +    real_1d_array x = "[0,0]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[-1,-1]";
    +    real_1d_array bndu = "[+1,+1]";
    +    double epsx = 0.0000000001;
    +    ae_int_t maxits = 0;
    +    minlmstate state;
    +
    +    //
    +    // Create optimizer, tell it to:
    +    // * use numerical differentiation with step equal to 1.0
    +    // * use unit scale for all variables (s is a unit vector)
    +    // * stop after short enough step (less than epsx)
    +    // * set box constraints
    +    //
    +    minlmcreatev(2, x, 0.0001, state);
    +    minlmsetbc(state, bndl, bndu);
    +    minlmsetcond(state, epsx, maxits);
    +    minlmsetscale(state, s);
    +
    +    //
    +    // Optimize
    +    //
    +    alglib::minlmoptimize(state, function1_fvec);
    +
    +    //
    +    // Test optimization results
    +    //
    +    // NOTE: because we use numerical differentiation, we do not
    +    //       verify Jacobian correctness - it is always "correct".
    +    //       However, if you switch to analytic gradient, consider
    +    //       checking it with OptGuard (see other examples).
    +    //
    +    minlmreport rep;
    +    minlmresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-1,+1]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  function1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    // f0(x0,x1) = 100*(x0+3)^4,
    +    // f1(x0,x1) = (x1-3)^4
    +    //
    +    fi[0] = 10*pow(x[0]+3,2);
    +    fi[1] = pow(x[1]-3,2);
    +}
    +void  function1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    // f0(x0,x1) = 100*(x0+3)^4,
    +    // f1(x0,x1) = (x1-3)^4
    +    // and Jacobian matrix J = [dfi/dxj]
    +    //
    +    fi[0] = 10*pow(x[0]+3,2);
    +    fi[1] = pow(x[1]-3,2);
    +    jac[0][0] = 20*(x[0]+3);
    +    jac[0][1] = 0;
    +    jac[1][0] = 0;
    +    jac[1][1] = 2*(x[1]-3);
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = f0^2+f1^2, where 
    +    //
    +    //     f0(x0,x1) = 10*(x0+3)^2
    +    //     f1(x0,x1) = (x1-3)^2
    +    //
    +    // using "VJ" mode of the Levenberg-Marquardt optimizer.
    +    //
    +    // Optimization algorithm uses:
    +    // * function vector f[] = {f1,f2}
    +    // * Jacobian matrix J = {dfi/dxj}.
    +    //
    +    real_1d_array x = "[0,0]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.0000000001;
    +    ae_int_t maxits = 0;
    +    minlmstate state;
    +
    +    //
    +    // Create optimizer, tell it to:
    +    // * use analytic gradient provided by user
    +    // * use unit scale for all variables (s is a unit vector)
    +    // * stop after short enough step (less than epsx)
    +    //
    +    minlmcreatevj(2, x, state);
    +    minlmsetcond(state, epsx, maxits);
    +    minlmsetscale(state, s);
    +
    +    //
    +    // Activate OptGuard integrity checking.
    +    //
    +    // OptGuard monitor helps to detect erroneous analytic Jacobian,
    +    // i.e. one inconsistent with actual change in the target function.
    +    //
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
    +    //
    +    // IMPORTANT: JACOBIAN VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION, THUS DO NOT USE IT IN PRODUCTION CODE!
    +    //
    +    minlmoptguardgradient(state, 0.001);
    +
    +    //
    +    // Optimize
    +    //
    +    alglib::minlmoptimize(state, function1_fvec, function1_jac);
    +
    +    //
    +    // Test optimization results
    +    //
    +    minlmreport rep;
    +    minlmresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [-3,+3]
    +
    +    //
    +    // Check that OptGuard did not report errors
    +    //
    +    // NOTE: want to test OptGuard? Try breaking the Jacobian - say, add
    +    //       1.0 to some of its components.
    +    //
    +    // NOTE: unfortunately, specifics of LM optimization do not allow us
    +    //       to detect errors like nonsmoothness (like we do with other
    +    //       optimizers). So, only Jacobian correctness is verified.
    +    //
    +    optguardreport ogrep;
    +    minlmoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +This structure stores optimization report: +* f target function value +* y dual variables +* stats array[N+M], statuses of box (N) and linear (M) + constraints: + * stats[i]>0 => constraint at upper bound + (also used for free non-basic + variables set to zero) + * stats[i]<0 => constraint at lower bound + * stats[i]=0 => constraint is inactive, basic + variable +* primalerror primal feasibility error +* dualerror dual feasibility error +* iterationscount iteration count +* terminationtype completion code (see below) + +Completion codes: +* -4 LP problem is primal unbounded (dual infeasible) +* -3 LP problem is primal infeasible (dual unbounded) +* 1..4 successful completion +* 5 MaxIts steps was taken +* 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. +*************************************************************************/ +
    class minlpreport +{ + double f; + real_1d_array y; + integer_1d_array stats; + double primalerror; + double dualerror; + ae_int_t iterationscount; + ae_int_t terminationtype; +}; + +
    + +
    +
    /************************************************************************* +This object stores linear solver state. +You should use functions provided by MinLP subpackage to work with this +object +*************************************************************************/ +
    class minlpstate +{ +}; + +
    + +
    +
    /************************************************************************* +This function appends two-sided linear constraint AL <= A*x <= AU to the +list of currently present constraints. + +Constraint is passed in compressed format: as list of non-zero entries of +coefficient vector A. Such approach is more efficient than dense storage +for highly sparse constraint vectors. + +INPUT PARAMETERS: + State - structure previously allocated with minlpcreate() call. + IdxA - array[NNZ], indexes of non-zero elements of A: + * can be unsorted + * can include duplicate indexes (corresponding entries of + ValA[] will be summed) + ValA - array[NNZ], values of non-zero elements of A + NNZ - number of non-zero coefficients in A + AL, AU - lower and upper bounds; + * AL=AU => equality constraint A*x + * AL<AU => two-sided constraint AL<=A*x<=AU + * AL=-INF => one-sided constraint A*x<=AU + * AU=+INF => one-sided constraint AL<=A*x + * AL=-INF, AU=+INF => constraint is ignored + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpaddlc2( + minlpstate state, + integer_1d_array idxa, + real_1d_array vala, + ae_int_t nnz, + double al, + double au, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function appends two-sided linear constraint AL <= A*x <= AU to the +list of currently present constraints. + +This version accepts dense constraint vector as input, but sparsifies it +for internal storage and processing. Thus, time to add one constraint in +is O(N) - we have to scan entire array of length N. Sparse version of this +function is order of magnitude faster for constraints with just a few +nonzeros per row. + +INPUT PARAMETERS: + State - structure previously allocated with minlpcreate() call. + A - linear constraint coefficient, array[N], right side is NOT + included. + AL, AU - lower and upper bounds; + * AL=AU => equality constraint Ai*x + * AL<AU => two-sided constraint AL<=A*x<=AU + * AL=-INF => one-sided constraint Ai*x<=AU + * AU=+INF => one-sided constraint AL<=Ai*x + * AL=-INF, AU=+INF => constraint is ignored + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpaddlc2dense( + minlpstate state, + real_1d_array a, + double al, + double au, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* + LINEAR PROGRAMMING + +The subroutine creates LP solver. After initial creation it contains +default optimization problem with zero cost vector and all variables being +fixed to zero values and no constraints. + +In order to actually solve something you should: +* set cost vector with minlpsetcost() +* set variable bounds with minlpsetbc() or minlpsetbcall() +* specify constraint matrix with one of the following functions: + [*] minlpsetlc() for dense one-sided constraints + [*] minlpsetlc2dense() for dense two-sided constraints + [*] minlpsetlc2() for sparse two-sided constraints + [*] minlpaddlc2dense() to add one dense row to constraint matrix + [*] minlpaddlc2() to add one row to constraint matrix (compressed format) +* call minlpoptimize() to run the solver and minlpresults() to get the + solution vector and additional information. + +Presently this optimizer supports only revised simplex method as +underlying solver. DSE pricing and bounds flipping ratio test (aka long +dual step) are supported. Large-scale sparse LU solver with Forest-Tomlin +is used internally as linear algebra driver. + +Future releases of ALGLIB may introduce other solvers. + +INPUT PARAMETERS: + N - problem size + +OUTPUT PARAMETERS: + State - optimizer in the default state + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpcreate( + ae_int_t n, + minlpstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function solves LP problem. + +INPUT PARAMETERS: + State - algorithm state + +You should use minlpresults() function to access results after calls to +this function. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey. +*************************************************************************/ +
    void alglib::minlpoptimize( + minlpstate state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +LP solver results + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + X - array[N], solution. Filled by zeros on failure. + Rep - optimization report. You should check Rep.TerminationType, + which contains completion code, and you may check another + fields which contain another information about algorithm + functioning. + + Failure codes returned by algorithm are: + * -4 LP problem is primal unbounded (dual infeasible) + * -3 LP problem is primal infeasible (dual unbounded) + + Success codes: + * 1..4 successful completion + * 5 MaxIts steps was taken + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpresults( + minlpstate state, + real_1d_array& x, + minlpreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +LP results + +Buffered implementation of MinLPResults() which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpresultsbuf( + minlpstate state, + real_1d_array& x, + minlpreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets box constraints for LP solver (all variables at once, +different constraints for different variables). + +The default state of constraints is to have all variables fixed at zero. +You have to overwrite it by your own constraint vector. Constraint status +is preserved until constraints are explicitly overwritten with another +minlpsetbc() call, overwritten with minlpsetbcall(), or partially +overwritten with minlmsetbci() call. + +Following types of constraints are supported: + + DESCRIPTION CONSTRAINT HOW TO SPECIFY + fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] + lower bound BndL[i]<=x[i] BndU[i]=+INF + upper bound x[i]<=BndU[i] BndL[i]=-INF + range BndL[i]<=x[i]<=BndU[i] ... + free variable - BndL[I]=-INF, BndU[I]+INF + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[N]. + BndU - upper bounds, array[N]. + +NOTE: infinite values can be specified by means of Double.PositiveInfinity + and Double.NegativeInfinity (in C#) and alglib::fp_posinf and + alglib::fp_neginf (in C++). + +NOTE: you may replace infinities by very small/very large values, but it + is not recommended because large numbers may introduce large numerical + errors in the algorithm. + +NOTE: if constraints for all variables are same you may use minlpsetbcall() + which allows to specify constraints without using arrays. + +NOTE: BndL>BndU will result in LP problem being recognized as infeasible. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetbc( + minlpstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets box constraints for LP solver (all variables at once, +same constraints for all variables) + +The default state of constraints is to have all variables fixed at zero. +You have to overwrite it by your own constraint vector. Constraint status +is preserved until constraints are explicitly overwritten with another +minlpsetbc() call or partially overwritten with minlpsetbcall(). + +Following types of constraints are supported: + + DESCRIPTION CONSTRAINT HOW TO SPECIFY + fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] + lower bound BndL[i]<=x[i] BndU[i]=+INF + upper bound x[i]<=BndU[i] BndL[i]=-INF + range BndL[i]<=x[i]<=BndU[i] ... + free variable - BndL[I]=-INF, BndU[I]+INF + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bound, same for all variables + BndU - upper bound, same for all variables + +NOTE: infinite values can be specified by means of Double.PositiveInfinity + and Double.NegativeInfinity (in C#) and alglib::fp_posinf and + alglib::fp_neginf (in C++). + +NOTE: you may replace infinities by very small/very large values, but it + is not recommended because large numbers may introduce large numerical + errors in the algorithm. + +NOTE: minlpsetbc() can be used to specify different constraints for + different variables. + +NOTE: BndL>BndU will result in LP problem being recognized as infeasible. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetbcall( + minlpstate state, + double bndl, + double bndu, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets box constraints for I-th variable (other variables are +not modified). + +The default state of constraints is to have all variables fixed at zero. +You have to overwrite it by your own constraint vector. + +Following types of constraints are supported: + + DESCRIPTION CONSTRAINT HOW TO SPECIFY + fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] + lower bound BndL[i]<=x[i] BndU[i]=+INF + upper bound x[i]<=BndU[i] BndL[i]=-INF + range BndL[i]<=x[i]<=BndU[i] ... + free variable - BndL[I]=-INF, BndU[I]+INF + +INPUT PARAMETERS: + State - structure stores algorithm state + I - variable index, in [0,N) + BndL - lower bound for I-th variable + BndU - upper bound for I-th variable + +NOTE: infinite values can be specified by means of Double.PositiveInfinity + and Double.NegativeInfinity (in C#) and alglib::fp_posinf and + alglib::fp_neginf (in C++). + +NOTE: you may replace infinities by very small/very large values, but it + is not recommended because large numbers may introduce large numerical + errors in the algorithm. + +NOTE: minlpsetbc() can be used to specify different constraints for + different variables. + +NOTE: BndL>BndU will result in LP problem being recognized as infeasible. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetbci( + minlpstate state, + ae_int_t i, + double bndl, + double bndu, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets cost term for LP solver. + +By default, cost term is zero. + +INPUT PARAMETERS: + State - structure which stores algorithm state + C - cost term, array[N]. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetcost( + minlpstate state, + real_1d_array c, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets one-sided linear constraints A*x ~ AU, where "~" can be +a mix of "<=", "=" and ">=". + +IMPORTANT: this function is provided here for compatibility with the rest + of ALGLIB optimizers which accept constraints in format like + this one. Many real-life problems feature two-sided constraints + like a0 <= a*x <= a1. It is really inefficient to add them as a + pair of one-sided constraints. + + Use minlpsetlc2dense(), minlpsetlc2(), minlpaddlc2() (or its + sparse version) wherever possible. + +INPUT PARAMETERS: + State - structure previously allocated with minlpcreate() call. + A - linear constraints, array[K,N+1]. Each row of A represents + one constraint, with first N elements being linear coefficients, + and last element being right side. + CT - constraint types, array[K]: + * if CT[i]>0, then I-th constraint is A[i,*]*x >= A[i,n] + * if CT[i]=0, then I-th constraint is A[i,*]*x = A[i,n] + * if CT[i]<0, then I-th constraint is A[i,*]*x <= A[i,n] + K - number of equality/inequality constraints, K>=0; if not + given, inferred from sizes of A and CT. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetlc( + minlpstate state, + real_2d_array a, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::minlpsetlc( + minlpstate state, + real_2d_array a, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets two-sided linear constraints AL <= A*x <= AU with +sparse constraining matrix A. Recommended for large-scale problems. + +This function overwrites linear (non-box) constraints set by previous +calls (if such calls were made). + +INPUT PARAMETERS: + State - structure previously allocated with minlpcreate() call. + A - sparse matrix with size [K,N] (exactly!). + Each row of A represents one general linear constraint. + A can be stored in any sparse storage format. + AL, AU - lower and upper bounds, array[K]; + * AL[i]=AU[i] => equality constraint Ai*x + * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i] + * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] + * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x + * AL[i]=-INF, AU[i]=+INF => constraint is ignored + K - number of equality/inequality constraints, K>=0. If K=0 + is specified, A, AL, AU are ignored. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetlc2( + minlpstate state, + sparsematrix a, + real_1d_array al, + real_1d_array au, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets two-sided linear constraints AL <= A*x <= AU. + +This version accepts dense matrix as input; internally LP solver uses +sparse storage anyway (most LP problems are sparse), but for your +convenience it may accept dense inputs. This function overwrites linear +constraints set by previous calls (if such calls were made). + +We recommend you to use sparse version of this function unless you solve +small-scale LP problem (less than few hundreds of variables). + +NOTE: there also exist several versions of this function: + * one-sided dense version which accepts constraints in the same + format as one used by QP and NLP solvers + * two-sided sparse version which accepts sparse matrix + * two-sided dense version which allows you to add constraints row by row + * two-sided sparse version which allows you to add constraints row by row + +INPUT PARAMETERS: + State - structure previously allocated with minlpcreate() call. + A - linear constraints, array[K,N]. Each row of A represents + one constraint. One-sided inequality constraints, two- + sided inequality constraints, equality constraints are + supported (see below) + AL, AU - lower and upper bounds, array[K]; + * AL[i]=AU[i] => equality constraint Ai*x + * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i] + * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] + * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x + * AL[i]=-INF, AU[i]=+INF => constraint is ignored + K - number of equality/inequality constraints, K>=0; if not + given, inferred from sizes of A, AL, AU. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetlc2dense( + minlpstate state, + real_2d_array a, + real_1d_array al, + real_1d_array au, + const xparams _params = alglib::xdefault); +void alglib::minlpsetlc2dense( + minlpstate state, + real_2d_array a, + real_1d_array al, + real_1d_array au, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets scaling coefficients. + +ALGLIB optimizers use scaling matrices to test stopping conditions and as +preconditioner. + +Scale of the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the + function + +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minlpsetscale( + minlpstate state, + real_1d_array s, + const xparams _params = alglib::xdefault); + +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates how to minimize
    +    //
    +    //     F(x0,x1) = -0.1*x0 - x1
    +    //
    +    // subject to box constraints
    +    //
    +    //     -1 <= x0,x1 <= +1 
    +    //
    +    // and general linear constraints
    +    //
    +    //     x0 - x1 >= -1
    +    //     x0 + x1 <=  1
    +    //
    +    // We use dual simplex solver provided by ALGLIB for this task. Box
    +    // constraints are specified by means of constraint vectors bndl and
    +    // bndu (we have bndl<=x<=bndu). General linear constraints are
    +    // specified as AL<=A*x<=AU, with AL/AU being 2x1 vectors and A being
    +    // 2x2 matrix.
    +    //
    +    // NOTE: some/all components of AL/AU can be +-INF, same applies to
    +    //       bndl/bndu. You can also have AL[I]=AU[i] (as well as
    +    //       BndL[i]=BndU[i]).
    +    //
    +    real_2d_array a = "[[1,-1],[1,+1]]";
    +    real_1d_array al = "[-1,-inf]";
    +    real_1d_array au = "[+inf,+1]";
    +    real_1d_array c = "[-0.1,-1]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[-1,-1]";
    +    real_1d_array bndu = "[+1,+1]";
    +    real_1d_array x;
    +    minlpstate state;
    +    minlpreport rep;
    +
    +    minlpcreate(2, state);
    +
    +    //
    +    // Set cost vector, box constraints, general linear constraints.
    +    //
    +    // Box constraints can be set in one call to minlpsetbc() or minlpsetbcall()
    +    // (latter sets same constraints for all variables and accepts two scalars
    +    // instead of two vectors).
    +    //
    +    // General linear constraints can be specified in several ways:
    +    // * minlpsetlc2dense() - accepts dense 2D array as input; sometimes this
    +    //   approach is more convenient, although less memory-efficient.
    +    // * minlpsetlc2() - accepts sparse matrix as input
    +    // * minlpaddlc2dense() - appends one row to the current set of constraints;
    +    //   row being appended is specified as dense vector
    +    // * minlpaddlc2() - appends one row to the current set of constraints;
    +    //   row being appended is specified as sparse set of elements
    +    // Independently from specific function being used, LP solver uses sparse
    +    // storage format for internal representation of constraints.
    +    //
    +    minlpsetcost(state, c);
    +    minlpsetbc(state, bndl, bndu);
    +    minlpsetlc2dense(state, a, al, au, 2);
    +
    +    //
    +    // Set scale of the parameters.
    +    //
    +    // It is strongly recommended that you set scale of your variables.
    +    // Knowing their scales is essential for evaluation of stopping criteria
    +    // and for preconditioning of the algorithm steps.
    +    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    +    //
    +    minlpsetscale(state, s);
    +
    +    // Solve
    +    minlpoptimize(state);
    +    minlpresults(state, x, rep);
    +    printf("%s\n", x.tostring(3).c_str()); // EXPECTED: [0,1]
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +These fields store optimization report: +* iterationscount total number of inner iterations +* nfev number of gradient evaluations +* terminationtype termination type (see below) + +Scaled constraint violations are reported: +* bcerr maximum violation of the box constraints +* bcidx index of the most violated box constraint (or + -1, if all box constraints are satisfied or + there is no box constraint) +* lcerr maximum violation of the linear constraints, + computed as maximum scaled distance between + final point and constraint boundary. +* lcidx index of the most violated linear constraint + (or -1, if all constraints are satisfied or + there is no general linear constraints) +* nlcerr maximum violation of the nonlinear constraints +* nlcidx index of the most violated nonlinear constraint + (or -1, if all constraints are satisfied or + there is no nonlinear constraints) + +Violations of box constraints are scaled on per-component basis according +to the scale vector s[] as specified by minnlcsetscale(). Violations of +the general linear constraints are also computed using user-supplied +variable scaling. Violations of nonlinear constraints are computed "as is" + +TERMINATION CODES + +TerminationType field contains completion code, which can be either: + +=== FAILURE CODE === + -8 internal integrity control detected infinite or NAN values in + function/gradient. Abnormal termination signaled. + -3 box constraints are infeasible. Note: infeasibility of non-box + constraints does NOT trigger emergency completion; you have to + examine bcerr/lcerr/nlcerr to detect possibly inconsistent + constraints. + +=== SUCCESS CODE === + 2 relative step is no more than EpsX. + 5 MaxIts steps was taken + 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. + 8 user requested algorithm termination via minnlcrequesttermination(), + last accepted point is returned + +Other fields of this structure are not documented and should not be used! +*************************************************************************/ +
    class minnlcreport +{ + ae_int_t iterationscount; + ae_int_t nfev; + ae_int_t terminationtype; + double bcerr; + ae_int_t bcidx; + double lcerr; + ae_int_t lcidx; + double nlcerr; + ae_int_t nlcidx; + ae_int_t dbgphase0its; +}; + +
    + +
    +
    /************************************************************************* +This object stores nonlinear optimizer state. +You should use functions provided by MinNLC subpackage to work with this +object +*************************************************************************/ +
    class minnlcstate +{ +}; + +
    + +
    +
    /************************************************************************* + NONLINEARLY CONSTRAINED OPTIMIZATION + WITH PRECONDITIONED AUGMENTED LAGRANGIAN ALGORITHM + +DESCRIPTION: +The subroutine minimizes function F(x) of N arguments subject to any +combination of: +* bound constraints +* linear inequality constraints +* linear equality constraints +* nonlinear equality constraints Gi(x)=0 +* nonlinear inequality constraints Hi(x)<=0 + +REQUIREMENTS: +* user must provide function value and gradient for F(), H(), G() +* starting point X0 must be feasible or not too far away from the feasible + set +* F(), G(), H() are continuously differentiable on the feasible set and + its neighborhood +* nonlinear constraints G() and H() must have non-zero gradient at G(x)=0 + and at H(x)=0. Say, constraint like x^2>=1 is supported, but x^2>=0 is + NOT supported. + +USAGE: + +Constrained optimization if far more complex than the unconstrained one. +Nonlinearly constrained optimization is one of the most esoteric numerical +procedures. + +Here we give very brief outline of the MinNLC optimizer. We strongly +recommend you to study examples in the ALGLIB Reference Manual and to read +ALGLIB User Guide on optimization, which is available at +http://www.alglib.net/optimization/ + +1. User initializes algorithm state with MinNLCCreate() call and chooses + what NLC solver to use. There is some solver which is used by default, + with default settings, but you should NOT rely on default choice. It + may change in future releases of ALGLIB without notice, and no one can + guarantee that new solver will be able to solve your problem with + default settings. + + From the other side, if you choose solver explicitly, you can be pretty + sure that it will work with new ALGLIB releases. + + In the current release following solvers can be used: + * SQP solver, recommended for medium-scale problems (less than thousand + of variables) with hard-to-evaluate target functions. Requires less + function evaluations than other solvers but each step involves + solution of QP subproblem, so running time may be higher than that of + AUL (another recommended option). Activated with minnlcsetalgosqp() + function. + * AUL solver with dense preconditioner, recommended for large-scale + problems or for problems with cheap target function. Needs more + function evaluations that SQP (about 5x-10x times more), but its + iterations are much cheaper that that of SQP. Activated with + minnlcsetalgoaul() function. + * SLP solver, successive linear programming. The slowest one, requires + more target function evaluations that SQP and AUL. However, it is + somewhat more robust in tricky cases, so it can be used as a backup + plan. Activated with minnlcsetalgoslp() function. + +2. [optional] user activates OptGuard integrity checker which tries to + detect possible errors in the user-supplied callbacks: + * discontinuity/nonsmoothness of the target/nonlinear constraints + * errors in the analytic gradient provided by user + This feature is essential for early prototyping stages because it helps + to catch common coding and problem statement errors. + OptGuard can be activated with following functions (one per each check + performed): + * minnlcoptguardsmoothness() + * minnlcoptguardgradient() + +3. User adds boundary and/or linear and/or nonlinear constraints by means + of calling one of the following functions: + a) minnlcsetbc() for boundary constraints + b) minnlcsetlc() for linear constraints + c) minnlcsetnlc() for nonlinear constraints + You may combine (a), (b) and (c) in one optimization problem. + +4. User sets scale of the variables with minnlcsetscale() function. It is + VERY important to set scale of the variables, because nonlinearly + constrained problems are hard to solve when variables are badly scaled. + +5. User sets stopping conditions with minnlcsetcond(). If NLC solver + uses inner/outer iteration layout, this function sets stopping + conditions for INNER iterations. + +6. Finally, user calls minnlcoptimize() function which takes algorithm + state and pointer (delegate, etc.) to callback function which calculates + F/G/H. + +7. User calls minnlcresults() to get solution; additionally you can + retrieve OptGuard report with minnlcoptguardresults(), and get detailed + report about purported errors in the target function with: + * minnlcoptguardnonc1test0results() + * minnlcoptguardnonc1test1results() + +8. Optionally user may call minnlcrestartfrom() to solve another problem + with same N but another starting point. minnlcrestartfrom() allows to + reuse already initialized structure. + + +INPUT PARAMETERS: + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size ofX + X - starting point, array[N]: + * it is better to set X to a feasible point + * but X can be infeasible, in which case algorithm will try + to find feasible point first, using X as initial + approximation. + +OUTPUT PARAMETERS: + State - structure stores algorithm state + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlccreate( + real_1d_array x, + minnlcstate& state, + const xparams _params = alglib::xdefault); +void alglib::minnlccreate( + ae_int_t n, + real_1d_array x, + minnlcstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This subroutine is a finite difference variant of MinNLCCreate(). It uses +finite differences in order to differentiate target function. + +Description below contains information which is specific to this function +only. We recommend to read comments on MinNLCCreate() in order to get more +information about creation of NLC optimizer. + +INPUT PARAMETERS: + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size ofX + X - starting point, array[N]: + * it is better to set X to a feasible point + * but X can be infeasible, in which case algorithm will try + to find feasible point first, using X as initial + approximation. + DiffStep- differentiation step, >0 + +OUTPUT PARAMETERS: + State - structure stores algorithm state + +NOTES: +1. algorithm uses 4-point central formula for differentiation. +2. differentiation step along I-th axis is equal to DiffStep*S[I] where + S[] is scaling vector which can be set by MinNLCSetScale() call. +3. we recommend you to use moderate values of differentiation step. Too + large step will result in too large TRUNCATION errors, while too small + step will result in too large NUMERICAL errors. 1.0E-4 can be good + value to start from. +4. Numerical differentiation is very inefficient - one gradient + calculation needs 4*N function evaluations. This function will work for + any N - either small (1...10), moderate (10...100) or large (100...). + However, performance penalty will be too severe for any N's except for + small ones. + We should also say that code which relies on numerical differentiation + is less robust and precise. Imprecise gradient may slow down + convergence, especially on highly nonlinear problems. + Thus we recommend to use this function for fast prototyping on small- + dimensional problems only, and to implement analytical gradient as soon + as possible. + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlccreatef( + real_1d_array x, + double diffstep, + minnlcstate& state, + const xparams _params = alglib::xdefault); +void alglib::minnlccreatef( + ae_int_t n, + real_1d_array x, + double diffstep, + minnlcstate& state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function activates/deactivates verification of the user-supplied +analytic gradient/Jacobian. + +Upon activation of this option OptGuard integrity checker performs +numerical differentiation of your target function (constraints) at the +initial point (note: future versions may also perform check at the final +point) and compares numerical gradient/Jacobian with analytic one provided +by you. + +If difference is too large, an error flag is set and optimization session +continues. After optimization session is over, you can retrieve the report +which stores both gradients/Jacobians, and specific components highlighted +as suspicious by the OptGuard. + +The primary OptGuard report can be retrieved with minnlcoptguardresults(). + +IMPORTANT: gradient check is a high-overhead option which will cost you + about 3*N additional function evaluations. In many cases it may + cost as much as the rest of the optimization session. + + YOU SHOULD NOT USE IT IN THE PRODUCTION CODE UNLESS YOU WANT TO + CHECK DERIVATIVES PROVIDED BY SOME THIRD PARTY. + +NOTE: unlike previous incarnation of the gradient checking code, OptGuard + does NOT interrupt optimization even if it discovers bad gradient. + +INPUT PARAMETERS: + State - structure used to store algorithm state + TestStep - verification step used for numerical differentiation: + * TestStep=0 turns verification off + * TestStep>0 activates verification + You should carefully choose TestStep. Value which is + too large (so large that function behavior is non- + cubic at this scale) will lead to false alarms. Too + short step will result in rounding errors dominating + numerical derivative. + + You may use different step for different parameters by + means of setting scale with minnlcsetscale(). + +=== EXPLANATION ========================================================== + +In order to verify gradient algorithm performs following steps: + * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], + where X[i] is i-th component of the initial point and S[i] is a scale + of i-th parameter + * F(X) is evaluated at these trial points + * we perform one more evaluation in the middle point of the interval + * we build cubic model using function values and derivatives at trial + points and we compare its prediction with actual value in the middle + point + + -- ALGLIB -- + Copyright 15.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcoptguardgradient( + minnlcstate state, + double teststep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #0 + +Nonsmoothness (non-C1) test #0 studies function values (not gradient!) +obtained during line searches and monitors behavior of the directional +derivative estimate. + +This test is less powerful than test #1, but it does not depend on the +gradient values and thus it is more robust against artifacts introduced by +numerical differentiation. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* fidx - is an index of the function (0 for target function, 1 or higher + for nonlinear constraints) which is suspected of being "non-C1" +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #0 "strong" report + lngrep - C1 test #0 "long" report + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcoptguardnonc1test0results( + minnlcstate state, + optguardnonc1test0report& strrep, + optguardnonc1test0report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Detailed results of the OptGuard integrity check for nonsmoothness test #1 + +Nonsmoothness (non-C1) test #1 studies individual components of the +gradient computed during line search. + +When precise analytic gradient is provided this test is more powerful than +test #0 which works with function values and ignores user-provided +gradient. However, test #0 becomes more powerful when numerical +differentiation is employed (in such cases test #1 detects higher levels +of numerical noise and becomes too conservative). + +This test also tells specific components of the gradient which violate C1 +continuity, which makes it more informative than #0, which just tells that +continuity is violated. + +Two reports are returned: +* a "strongest" one, corresponding to line search which had highest + value of the nonsmoothness indicator +* a "longest" one, corresponding to line search which had more function + evaluations, and thus is more detailed + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* fidx - is an index of the function (0 for target function, 1 or higher + for nonlinear constraints) which is suspected of being "non-C1" +* vidx - is an index of the variable in [0,N) with nonsmooth derivative +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], g[] - arrays of length CNT which store step lengths and gradient + values at these points; g[i] is evaluated in x0+stp[i]*d and contains + vidx-th component of the gradient. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +========================================================================== += SHORTLY SPEAKING: build a 2D plot of (stp,f) and look at it - you will += see where C1 continuity is violated. +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + strrep - C1 test #1 "strong" report + lngrep - C1 test #1 "long" report + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcoptguardnonc1test1results( + minnlcstate state, + optguardnonc1test1report& strrep, + optguardnonc1test1report& lngrep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Results of OptGuard integrity check, should be called after optimization +session is over. + +=== PRIMARY REPORT ======================================================= + +OptGuard performs several checks which are intended to catch common errors +in the implementation of nonlinear function/gradient: +* incorrect analytic gradient +* discontinuous (non-C0) target functions (constraints) +* nonsmooth (non-C1) target functions (constraints) + +Each of these checks is activated with appropriate function: +* minnlcoptguardgradient() for gradient verification +* minnlcoptguardsmoothness() for C0/C1 checks + +Following flags are set when these errors are suspected: +* rep.badgradsuspected, and additionally: + * rep.badgradfidx for specific function (Jacobian row) suspected + * rep.badgradvidx for specific variable (Jacobian column) suspected + * rep.badgradxbase, a point where gradient/Jacobian is tested + * rep.badgraduser, user-provided gradient/Jacobian + * rep.badgradnum, reference gradient/Jacobian obtained via numerical + differentiation +* rep.nonc0suspected, and additionally: + * rep.nonc0fidx - an index of specific function violating C0 continuity +* rep.nonc1suspected, and additionally + * rep.nonc1fidx - an index of specific function violating C1 continuity +Here function index 0 means target function, index 1 or higher denotes +nonlinear constraints. + +=== ADDITIONAL REPORTS/LOGS ============================================== + +Several different tests are performed to catch C0/C1 errors, you can find +out specific test signaled error by looking to: +* rep.nonc0test0positive, for non-C0 test #0 +* rep.nonc1test0positive, for non-C1 test #0 +* rep.nonc1test1positive, for non-C1 test #1 + +Additional information (including line search logs) can be obtained by +means of: +* minnlcoptguardnonc1test0results() +* minnlcoptguardnonc1test1results() +which return detailed error reports, specific points where discontinuities +were found, and so on. + +========================================================================== + +INPUT PARAMETERS: + state - algorithm state + +OUTPUT PARAMETERS: + rep - generic OptGuard report; more detailed reports can be + retrieved with other functions. + +NOTE: false negatives (nonsmooth problems are not identified as nonsmooth + ones) are possible although unlikely. + + The reason is that you need to make several evaluations around + nonsmoothness in order to accumulate enough information about + function curvature. Say, if you start right from the nonsmooth point, + optimizer simply won't get enough data to understand what is going + wrong before it terminates due to abrupt changes in the derivative. + It is also possible that "unlucky" step will move us to the + termination too quickly. + + Our current approach is to have less than 0.1% false negatives in + our test examples (measured with multiple restarts from random + points), and to have exactly 0% false positives. + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcoptguardresults( + minnlcstate state, + optguardreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This function activates/deactivates nonsmoothness monitoring option of +the OptGuard integrity checker. Smoothness monitor silently observes +solution process and tries to detect ill-posed problems, i.e. ones with: +a) discontinuous target function (non-C0) and/or constraints +b) nonsmooth target function (non-C1) and/or constraints + +Smoothness monitoring does NOT interrupt optimization even if it suspects +that your problem is nonsmooth. It just sets corresponding flags in the +OptGuard report which can be retrieved after optimization is over. + +Smoothness monitoring is a moderate overhead option which often adds less +than 1% to the optimizer running time. Thus, you can use it even for large +scale problems. + +NOTE: OptGuard does NOT guarantee that it will always detect C0/C1 + continuity violations. + + First, minor errors are hard to catch - say, a 0.0001 difference in + the model values at two sides of the gap may be due to discontinuity + of the model - or simply because the model has changed. + + Second, C1-violations are especially difficult to detect in a + noninvasive way. The optimizer usually performs very short steps + near the nonsmoothness, and differentiation usually introduces a + lot of numerical noise. It is hard to tell whether some tiny + discontinuity in the slope is due to real nonsmoothness or just due + to numerical noise alone. + + Our top priority was to avoid false positives, so in some rare cases + minor errors may went unnoticed (however, in most cases they can be + spotted with restart from different initial point). + +INPUT PARAMETERS: + state - algorithm state + level - monitoring level: + * 0 - monitoring is disabled + * 1 - noninvasive low-overhead monitoring; function values + and/or gradients are recorded, but OptGuard does not + try to perform additional evaluations in order to + get more information about suspicious locations. + This kind of monitoring does not work well with SQP + because SQP solver needs just 1-2 function evaluations + per step, which is not enough for OptGuard to make + any conclusions. + +=== EXPLANATION ========================================================== + +One major source of headache during optimization is the possibility of +the coding errors in the target function/constraints (or their gradients). +Such errors most often manifest themselves as discontinuity or +nonsmoothness of the target/constraints. + +Another frequent situation is when you try to optimize something involving +lots of min() and max() operations, i.e. nonsmooth target. Although not a +coding error, it is nonsmoothness anyway - and smooth optimizers usually +stop right after encountering nonsmoothness, well before reaching solution. + +OptGuard integrity checker helps you to catch such situations: it monitors +function values/gradients being passed to the optimizer and tries to +errors. Upon discovering suspicious pair of points it raises appropriate +flag (and allows you to continue optimization). When optimization is done, +you can study OptGuard result. + + -- ALGLIB -- + Copyright 21.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcoptguardsmoothness( + minnlcstate state, + const xparams _params = alglib::xdefault); +void alglib::minnlcoptguardsmoothness( + minnlcstate state, + ae_int_t level, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +This family of functions is used to launcn iterations of nonlinear optimizer + +These functions accept following parameters: + state - algorithm state + fvec - callback which calculates function vector fi[] + at given point x + jac - callback which calculates function vector fi[] + and Jacobian jac at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL + + +NOTES: + +1. This function has two different implementations: one which uses exact + (analytical) user-supplied Jacobian, and one which uses only function + vector and numerically differentiates function in order to obtain + gradient. + + Depending on the specific function used to create optimizer object + you should choose appropriate variant of MinNLCOptimize() - one which + accepts function AND Jacobian or one which accepts ONLY function. + + Be careful to choose variant of MinNLCOptimize() which corresponds to + your optimization scheme! Table below lists different combinations of + callback (function/gradient) passed to MinNLCOptimize() and specific + function used to create optimizer. + + + | USER PASSED TO MinNLCOptimize() + CREATED WITH | function only | function and gradient + ------------------------------------------------------------ + MinNLCCreateF() | works FAILS + MinNLCCreate() | FAILS works + + Here "FAILS" denotes inappropriate combinations of optimizer creation + function and MinNLCOptimize() version. Attemps to use such + combination will lead to exception. Either you did not pass gradient + when it WAS needed or you passed gradient when it was NOT needed. + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void minnlcoptimize(minnlcstate &state, + void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minnlcoptimize(minnlcstate &state, + void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This subroutine submits request for termination of running optimizer. It +should be called from user-supplied callback when user decides that it is +time to "smoothly" terminate optimization process. As result, optimizer +stops at point which was "current accepted" when termination request was +submitted and returns error code 8 (successful termination). + +INPUT PARAMETERS: + State - optimizer structure + +NOTE: after request for termination optimizer may perform several + additional calls to user-supplied callbacks. It does NOT guarantee + to stop immediately - it just guarantees that these additional calls + will be discarded later. + +NOTE: calling this function on optimizer which is NOT running will have no + effect. + +NOTE: multiple calls to this function are possible. First call is counted, + subsequent calls are silently ignored. + + -- ALGLIB -- + Copyright 08.10.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcrequesttermination( + minnlcstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine restarts algorithm from new point. +All optimization parameters (including constraints) are left unchanged. + +This function allows to solve multiple optimization problems (which +must have same number of dimensions) without object reallocation penalty. + +INPUT PARAMETERS: + State - structure previously allocated with MinNLCCreate call. + X - new starting point. + + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcrestartfrom( + minnlcstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +MinNLC results: the solution found, completion codes and additional +information. + +If you activated OptGuard integrity checking functionality and want to get +OptGuard report, it can be retrieved with: +* minnlcoptguardresults() - for a primary report about (a) suspected C0/C1 + continuity violations and (b) errors in the analytic gradient. +* minnlcoptguardnonc1test0results() - for C1 continuity violation test #0, + detailed line search log +* minnlcoptguardnonc1test1results() - for C1 continuity violation test #1, + detailed line search log + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + X - array[0..N-1], solution + Rep - optimization report, contains information about completion + code, constraint violation at the solution and so on. + + You should check rep.terminationtype in order to + distinguish successful termination from unsuccessful one: + + === FAILURE CODES === + * -8 internal integrity control detected infinite or + NAN values in function/gradient. Abnormal + termination signalled. + * -3 box constraints are infeasible. + Note: infeasibility of non-box constraints does + NOT trigger emergency completion; you have + to examine rep.bcerr/rep.lcerr/rep.nlcerr to + detect possibly inconsistent constraints. + + === SUCCESS CODES === + * 2 scaled step is no more than EpsX. + * 5 MaxIts steps were taken. + * 8 user requested algorithm termination via + minnlcrequesttermination(), last accepted point is + returned. + + More information about fields of this structure can be + found in the comments on minnlcreport datatype. + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcresults( + minnlcstate state, + real_1d_array& x, + minnlcreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +NLC results + +Buffered implementation of MinNLCResults() which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. + + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcresultsbuf( + minnlcstate state, + real_1d_array& x, + minnlcreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells MinNLC unit to use Augmented Lagrangian algorithm +for nonlinearly constrained optimization. This algorithm is a slight +modification of one described in "A Modified Barrier-Augmented Lagrangian +Method for Constrained Minimization (1999)" by D.GOLDFARB, R.POLYAK, +K. SCHEINBERG, I.YUZEFOVICH. + +AUL solver can be significantly faster than SQP on easy problems due to +cheaper iterations, although it needs more function evaluations. + +Augmented Lagrangian algorithm works by converting problem of minimizing +F(x) subject to equality/inequality constraints to unconstrained problem +of the form + + min[ f(x) + + + Rho*PENALTY_EQ(x) + SHIFT_EQ(x,Nu1) + + + Rho*PENALTY_INEQ(x) + SHIFT_INEQ(x,Nu2) ] + +where: +* Rho is a fixed penalization coefficient +* PENALTY_EQ(x) is a penalty term, which is used to APPROXIMATELY enforce + equality constraints +* SHIFT_EQ(x) is a special "shift" term which is used to "fine-tune" + equality constraints, greatly increasing precision +* PENALTY_INEQ(x) is a penalty term which is used to approximately enforce + inequality constraints +* SHIFT_INEQ(x) is a special "shift" term which is used to "fine-tune" + inequality constraints, greatly increasing precision +* Nu1/Nu2 are vectors of Lagrange coefficients which are fine-tuned during + outer iterations of algorithm + +This version of AUL algorithm uses preconditioner, which greatly +accelerates convergence. Because this algorithm is similar to penalty +methods, it may perform steps into infeasible area. All kinds of +constraints (boundary, linear and nonlinear ones) may be violated in +intermediate points - and in the solution. However, properly configured +AUL method is significantly better at handling constraints than barrier +and/or penalty methods. + +The very basic outline of algorithm is given below: +1) first outer iteration is performed with "default" values of Lagrange + multipliers Nu1/Nu2. Solution quality is low (candidate point can be + too far away from true solution; large violation of constraints is + possible) and is comparable with that of penalty methods. +2) subsequent outer iterations refine Lagrange multipliers and improve + quality of the solution. + +INPUT PARAMETERS: + State - structure which stores algorithm state + Rho - penalty coefficient, Rho>0: + * large enough that algorithm converges with desired + precision. Minimum value is 10*max(S'*diag(H)*S), where + S is a scale matrix (set by MinNLCSetScale) and H is a + Hessian of the function being minimized. If you can not + easily estimate Hessian norm, see our recommendations + below. + * not TOO large to prevent ill-conditioning + * for unit-scale problems (variables and Hessian have unit + magnitude), Rho=100 or Rho=1000 can be used. + * it is important to note that Rho is internally multiplied + by scaling matrix, i.e. optimum value of Rho depends on + scale of variables specified by MinNLCSetScale(). + ItsCnt - number of outer iterations: + * ItsCnt=0 means that small number of outer iterations is + automatically chosen (10 iterations in current version). + * ItsCnt=1 means that AUL algorithm performs just as usual + barrier method. + * ItsCnt>1 means that AUL algorithm performs specified + number of outer iterations + +HOW TO CHOOSE PARAMETERS + +Nonlinear optimization is a tricky area and Augmented Lagrangian algorithm +is sometimes hard to tune. Good values of Rho and ItsCnt are problem- +specific. In order to help you we prepared following set of +recommendations: + +* for unit-scale problems (variables and Hessian have unit magnitude), + Rho=100 or Rho=1000 can be used. + +* start from some small value of Rho and solve problem with just one + outer iteration (ItcCnt=1). In this case algorithm behaves like penalty + method. Increase Rho in 2x or 10x steps until you see that one outer + iteration returns point which is "rough approximation to solution". + + It is very important to have Rho so large that penalty term becomes + constraining i.e. modified function becomes highly convex in constrained + directions. + + From the other side, too large Rho may prevent you from converging to + the solution. You can diagnose it by studying number of inner iterations + performed by algorithm: too few (5-10 on 1000-dimensional problem) or + too many (orders of magnitude more than dimensionality) usually means + that Rho is too large. + +* with just one outer iteration you usually have low-quality solution. + Some constraints can be violated with very large margin, while other + ones (which are NOT violated in the true solution) can push final point + too far in the inner area of the feasible set. + + For example, if you have constraint x0>=0 and true solution x0=1, then + merely a presence of "x0>=0" will introduce a bias towards larger values + of x0. Say, algorithm may stop at x0=1.5 instead of 1.0. + +* after you found good Rho, you may increase number of outer iterations. + ItsCnt=10 is a good value. Subsequent outer iteration will refine values + of Lagrange multipliers. Constraints which were violated will be + enforced, inactive constraints will be dropped (corresponding multipliers + will be decreased). Ideally, you should see 10-1000x improvement in + constraint handling (constraint violation is reduced). + +* if you see that algorithm converges to vicinity of solution, but + additional outer iterations do not refine solution, it may mean that + algorithm is unstable - it wanders around true solution, but can not + approach it. Sometimes algorithm may be stabilized by increasing Rho one + more time, making it 5x or 10x larger. + +SCALING OF CONSTRAINTS [IMPORTANT] + +AUL optimizer scales variables according to scale specified by +MinNLCSetScale() function, so it can handle problems with badly scaled +variables (as long as we KNOW their scales). However, because function +being optimized is a mix of original function and constraint-dependent +penalty functions, it is important to rescale both variables AND +constraints. + +Say, if you minimize f(x)=x^2 subject to 1000000*x>=0, then you have +constraint whose scale is different from that of target function (another +example is 0.000001*x>=0). It is also possible to have constraints whose +scales are misaligned: 1000000*x0>=0, 0.000001*x1<=0. Inappropriate +scaling may ruin convergence because minimizing x^2 subject to x>=0 is NOT +same as minimizing it subject to 1000000*x>=0. + +Because we know coefficients of boundary/linear constraints, we can +automatically rescale and normalize them. However, there is no way to +automatically rescale nonlinear constraints Gi(x) and Hi(x) - they are +black boxes. + +It means that YOU are the one who is responsible for correct scaling of +nonlinear constraints Gi(x) and Hi(x). We recommend you to rescale +nonlinear constraints in such way that I-th component of dG/dX (or dH/dx) +has magnitude approximately equal to 1/S[i] (where S is a scale set by +MinNLCSetScale() function). + +WHAT IF IT DOES NOT CONVERGE? + +It is possible that AUL algorithm fails to converge to precise values of +Lagrange multipliers. It stops somewhere around true solution, but candidate +point is still too far from solution, and some constraints are violated. +Such kind of failure is specific for Lagrangian algorithms - technically, +they stop at some point, but this point is not constrained solution. + +There are exist several reasons why algorithm may fail to converge: +a) too loose stopping criteria for inner iteration +b) degenerate, redundant constraints +c) target function has unconstrained extremum exactly at the boundary of + some constraint +d) numerical noise in the target function + +In all these cases algorithm is unstable - each outer iteration results in +large and almost random step which improves handling of some constraints, +but violates other ones (ideally outer iterations should form a sequence +of progressively decreasing steps towards solution). + +First reason possible is that too loose stopping criteria for inner +iteration were specified. Augmented Lagrangian algorithm solves a sequence +of intermediate problems, and requries each of them to be solved with high +precision. Insufficient precision results in incorrect update of Lagrange +multipliers. + +Another reason is that you may have specified degenerate constraints: say, +some constraint was repeated twice. In most cases AUL algorithm gracefully +handles such situations, but sometimes it may spend too much time figuring +out subtle degeneracies in constraint matrix. + +Third reason is tricky and hard to diagnose. Consider situation when you +minimize f=x^2 subject to constraint x>=0. Unconstrained extremum is +located exactly at the boundary of constrained area. In this case +algorithm will tend to oscillate between negative and positive x. Each +time it stops at x<0 it "reinforces" constraint x>=0, and each time it is +bounced to x>0 it "relaxes" constraint (and is attracted to x<0). + +Such situation sometimes happens in problems with hidden symetries. +Algorithm is got caught in a loop with Lagrange multipliers being +continuously increased/decreased. Luckily, such loop forms after at least +three iterations, so this problem can be solved by DECREASING number of +outer iterations down to 1-2 and increasing penalty coefficient Rho as +much as possible. + +Final reason is numerical noise. AUL algorithm is robust against moderate +noise (more robust than, say, active set methods), but large noise may +destabilize algorithm. + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetalgoaul( + minnlcstate state, + double rho, + ae_int_t itscnt, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This function tells MinNLC optimizer to use SLP (Successive Linear +Programming) algorithm for nonlinearly constrained optimization. This +algorithm is a slight modification of one described in "A Linear +programming-based optimization algorithm for solving nonlinear programming +problems" (2010) by Claus Still and Tapio Westerlund. + +This solver is the slowest one in ALGLIB, it requires more target function +evaluations that SQP and AUL. However it is somewhat more robust in tricky +cases, so it can be used as a backup plan. We recommend to use this algo +when SQP/AUL do not work (does not return the solution you expect). If +trying different approach gives same results, then MAYBE something is +wrong with your optimization problem. + +Despite its name ("linear" = "first order method") this algorithm performs +steps similar to that of conjugate gradients method; internally it uses +orthogonality/conjugacy requirement for subsequent steps which makes it +closer to second order methods in terms of convergence speed. + +Convergence is proved for the following case: +* function and constraints are continuously differentiable (C1 class) +* extended Mangasarian–Fromovitz constraint qualification (EMFCQ) holds; + in the context of this algorithm EMFCQ means that one can, for any + infeasible point, find a search direction such that the constraint + infeasibilities are reduced. + +This algorithm has following nice properties: +* no parameters to tune +* no convexity requirements for target function or constraints +* initial point can be infeasible +* algorithm respects box constraints in all intermediate points (it does + not even evaluate function outside of box constrained area) +* once linear constraints are enforced, algorithm will not violate them +* no such guarantees can be provided for nonlinear constraints, but once + nonlinear constraints are enforced, algorithm will try to respect them + as much as possible +* numerical differentiation does not violate box constraints (although + general linear and nonlinear ones can be violated during differentiation) +* from our experience, this algorithm is somewhat more robust in really + difficult cases + +INPUT PARAMETERS: + State - structure which stores algorithm state + +===== TRACING SLP SOLVER ================================================= + +SLP solver supports advanced tracing capabilities. You can trace algorithm +output by specifying following trace symbols (case-insensitive) by means +of trace_file() call: +* 'SLP' - for basic trace of algorithm steps and decisions. Only + short scalars (function values and deltas) are printed. + N-dimensional quantities like search directions are NOT + printed. + It also prints OptGuard integrity checker report when + nonsmoothness of target/constraints is suspected. +* 'SLP.DETAILED'- for output of points being visited and search directions + This symbol also implicitly defines 'SLP'. You can + control output format by additionally specifying: + * nothing to output in 6-digit exponential format + * 'PREC.E15' to output in 15-digit exponential format + * 'PREC.F6' to output in 6-digit fixed-point format +* 'SLP.PROBING' - to let algorithm insert additional function evaluations + before line search in order to build human-readable + chart of the raw Lagrangian (~40 additional function + evaluations is performed for each line search). This + symbol also implicitly defines 'SLP'. +* 'OPTGUARD' - for report of smoothness/continuity violations in target + and/or constraints. This kind of reporting is included + in 'SLP', but it comes with lots of additional info. If + you need just smoothness monitoring, specify this + setting. + + NOTE: this tag merely directs OptGuard output to log + file. Even if you specify it, you still have to + configure OptGuard by calling minnlcoptguard...() + family of functions. + +By default trace is disabled and adds no overhead to the optimization +process. However, specifying any of the symbols adds some formatting and +output-related overhead. Specifying 'SLP.PROBING' adds even larger +overhead due to additional function evaluations being performed. + +You may specify multiple symbols by separating them with commas: +> +> alglib::trace_file("SLP,SLP.PROBING,PREC.F6", "path/to/trace.log") +> + + -- ALGLIB -- + Copyright 02.04.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetalgoslp( + minnlcstate state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This function tells MinNLC optimizer to use SQP (Successive Quadratic +Programming) algorithm for nonlinearly constrained optimization. + +This algorithm needs order of magnitude (5x-10x) less function evaluations +than AUL solver, but has higher overhead because each iteration involves +solution of quadratic programming problem. + +Convergence is proved for the following case: +* function and constraints are continuously differentiable (C1 class) + +This algorithm has following nice properties: +* no parameters to tune +* no convexity requirements for target function or constraints +* initial point can be infeasible +* algorithm respects box constraints in all intermediate points (it does + not even evaluate function outside of box constrained area) +* once linear constraints are enforced, algorithm will not violate them +* no such guarantees can be provided for nonlinear constraints, but once + nonlinear constraints are enforced, algorithm will try to respect them + as much as possible +* numerical differentiation does not violate box constraints (although + general linear and nonlinear ones can be violated during differentiation) + +We recommend this algorithm as a default option for medium-scale problems +(less than thousand of variables) or problems with target function being +hard to evaluate. + +For large-scale problems or ones with very cheap target function +AUL solver can be better option. + +INPUT PARAMETERS: + State - structure which stores algorithm state + +===== INTERACTION WITH OPTGUARD ========================================== + +OptGuard integrity checker allows us to catch problems like errors in +gradients and discontinuity/nonsmoothness of the target/constraints. +Latter kind of problems can be detected by looking upon line searches +performed during optimization and searching for signs of nonsmoothness. + +The problem with SQP is that it is too good for OptGuard to work - it does +not perform line searches. It typically needs 1-2 function evaluations +per step, and it is not enough for OptGuard to detect nonsmoothness. + +So, if you suspect that your problem is nonsmooth, we recommend you to use +AUL or SLP solvers. + +===== TRACING SQP SOLVER ================================================= + +SQP solver supports advanced tracing capabilities. You can trace algorithm +output by specifying following trace symbols (case-insensitive) by means +of trace_file() call: +* 'SQP' - for basic trace of algorithm steps and decisions. Only + short scalars (function values and deltas) are printed. + N-dimensional quantities like search directions are NOT + printed. + It also prints OptGuard integrity checker report when + nonsmoothness of target/constraints is suspected. +* 'SQP.DETAILED'- for output of points being visited and search directions + This symbol also implicitly defines 'SQP'. You can + control output format by additionally specifying: + * nothing to output in 6-digit exponential format + * 'PREC.E15' to output in 15-digit exponential format + * 'PREC.F6' to output in 6-digit fixed-point format +* 'SQP.PROBING' - to let algorithm insert additional function evaluations + before line search in order to build human-readable + chart of the raw Lagrangian (~40 additional function + evaluations is performed for each line search). This + symbol also implicitly defines 'SQP'. + +By default trace is disabled and adds no overhead to the optimization +process. However, specifying any of the symbols adds some formatting and +output-related overhead. Specifying 'SQP.PROBING' adds even larger +overhead due to additional function evaluations being performed. + +You may specify multiple symbols by separating them with commas: +> +> alglib::trace_file("SQP,SQP.PROBING,PREC.F6", "path/to/trace.log") +> + + -- ALGLIB -- + Copyright 02.12.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetalgosqp( + minnlcstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets boundary constraints for NLC optimizer. + +Boundary constraints are inactive by default (after initial creation). +They are preserved after algorithm restart with MinNLCRestartFrom(). + +You may combine boundary constraints with general linear ones - and with +nonlinear ones! Boundary constraints are handled more efficiently than +other types. Thus, if your problem has mixed constraints, you may +explicitly specify some of them as boundary and save some time/space. + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[N]. + If some (all) variables are unbounded, you may specify + very small number or -INF. + BndU - upper bounds, array[N]. + If some (all) variables are unbounded, you may specify + very large number or +INF. + +NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th +variable will be "frozen" at X[i]=BndL[i]=BndU[i]. + +NOTE 2: when you solve your problem with augmented Lagrangian solver, + boundary constraints are satisfied only approximately! It is + possible that algorithm will evaluate function outside of + feasible area! + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetbc( + minnlcstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets stopping conditions for inner iterations of optimizer. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsX - >=0 + The subroutine finishes its work if on k+1-th iteration + the condition |v|<=EpsX is fulfilled, where: + * |.| means Euclidian norm + * v - scaled step vector, v[i]=dx[i]/s[i] + * dx - step vector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinNLCSetScale() + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. + +Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic +selection of the stopping condition. + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetcond( + minnlcstate state, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This function sets linear constraints for MinNLC optimizer. + +Linear constraints are inactive by default (after initial creation). They +are preserved after algorithm restart with MinNLCRestartFrom(). + +You may combine linear constraints with boundary ones - and with nonlinear +ones! If your problem has mixed constraints, you may explicitly specify +some of them as linear. It may help optimizer to handle them more +efficiently. + +INPUT PARAMETERS: + State - structure previously allocated with MinNLCCreate call. + C - linear constraints, array[K,N+1]. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT + +NOTE 1: when you solve your problem with augmented Lagrangian solver, + linear constraints are satisfied only approximately! It is + possible that algorithm will evaluate function outside of + feasible area! + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetlc( + minnlcstate state, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::minnlcsetlc( + minnlcstate state, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets nonlinear constraints for MinNLC optimizer. + +In fact, this function sets NUMBER of nonlinear constraints. Constraints +itself (constraint functions) are passed to MinNLCOptimize() method. This +method requires user-defined vector function F[] and its Jacobian J[], +where: +* first component of F[] and first row of Jacobian J[] corresponds to + function being minimized +* next NLEC components of F[] (and rows of J) correspond to nonlinear + equality constraints G_i(x)=0 +* next NLIC components of F[] (and rows of J) correspond to nonlinear + inequality constraints H_i(x)<=0 + +NOTE: you may combine nonlinear constraints with linear/boundary ones. If + your problem has mixed constraints, you may explicitly specify some + of them as linear ones. It may help optimizer to handle them more + efficiently. + +INPUT PARAMETERS: + State - structure previously allocated with MinNLCCreate call. + NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 + NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 + +NOTE 1: when you solve your problem with augmented Lagrangian solver, + nonlinear constraints are satisfied only approximately! It is + possible that algorithm will evaluate function outside of + feasible area! + +NOTE 2: algorithm scales variables according to scale specified by + MinNLCSetScale() function, so it can handle problems with badly + scaled variables (as long as we KNOW their scales). + + However, there is no way to automatically scale nonlinear + constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may + ruin convergence. Solving problem with constraint "1000*G0(x)=0" + is NOT same as solving it with constraint "0.001*G0(x)=0". + + It means that YOU are the one who is responsible for correct + scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you + to scale nonlinear constraints in such way that I-th component of + dG/dX (or dH/dx) has approximately unit magnitude (for problems + with unit scale) or has magnitude approximately equal to 1/S[i] + (where S is a scale set by MinNLCSetScale() function). + + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetnlc( + minnlcstate state, + ae_int_t nlec, + ae_int_t nlic, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This function sets preconditioner to "exact low rank" mode. + +Preconditioning is very important for convergence of Augmented Lagrangian +algorithm because presence of penalty term makes problem ill-conditioned. +Difference between performance of preconditioned and unpreconditioned +methods can be as large as 100x! + +MinNLC optimizer may use following preconditioners, each with its own +benefits and drawbacks: + a) inexact LBFGS-based, with O(N*K) evaluation time + b) exact low rank one, with O(N*K^2) evaluation time + c) exact robust one, with O(N^3+K*N^2) evaluation time +where K is a total number of general linear and nonlinear constraints (box +ones are not counted). + +It also provides special unpreconditioned mode of operation which can be +used for test purposes. Comments below discuss low rank preconditioner. + +Exact low-rank preconditioner uses Woodbury matrix identity to build +quadratic model of the penalized function. It has following features: +* no special assumptions about orthogonality of constraints +* preconditioner evaluation is optimized for K<<N. Its cost is O(N*K^2), + so it may become prohibitively slow for K>=N. +* finally, stability of the process is guaranteed only for K<<N. Woodbury + update often fail for K>=N due to degeneracy of intermediate matrices. + That's why we recommend to use "exact robust" preconditioner for such + cases. + +RECOMMENDATIONS + +We recommend to choose between "exact low rank" and "exact robust" +preconditioners, with "low rank" version being chosen when you know in +advance that total count of non-box constraints won't exceed N, and "robust" +version being chosen when you need bulletproof solution. + +INPUT PARAMETERS: + State - structure stores algorithm state + UpdateFreq- update frequency. Preconditioner is rebuilt after every + UpdateFreq iterations. Recommended value: 10 or higher. + Zero value means that good default value will be used. + + -- ALGLIB -- + Copyright 26.09.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetprecexactlowrank( + minnlcstate state, + ae_int_t updatefreq, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This function sets preconditioner to "exact robust" mode. + +Preconditioning is very important for convergence of Augmented Lagrangian +algorithm because presence of penalty term makes problem ill-conditioned. +Difference between performance of preconditioned and unpreconditioned +methods can be as large as 100x! + +MinNLC optimizer may use following preconditioners, each with its own +benefits and drawbacks: + a) inexact LBFGS-based, with O(N*K) evaluation time + b) exact low rank one, with O(N*K^2) evaluation time + c) exact robust one, with O(N^3+K*N^2) evaluation time +where K is a total number of general linear and nonlinear constraints (box +ones are not counted). + +It also provides special unpreconditioned mode of operation which can be +used for test purposes. Comments below discuss robust preconditioner. + +Exact robust preconditioner uses Cholesky decomposition to invert +approximate Hessian matrix H=D+W'*C*W (where D stands for diagonal terms +of Hessian, combined result of initial scaling matrix and penalty from box +constraints; W stands for general linear constraints and linearization of +nonlinear ones; C stands for diagonal matrix of penalty coefficients). + +This preconditioner has following features: +* no special assumptions about constraint structure +* preconditioner is optimized for stability; unlike "exact low rank" + version which fails for K>=N, this one works well for any value of K. +* the only drawback is that is takes O(N^3+K*N^2) time to build it. No + economical Woodbury update is applied even when it makes sense, thus + there are exist situations (K<<N) when "exact low rank" preconditioner + outperforms this one. + +RECOMMENDATIONS + +We recommend to choose between "exact low rank" and "exact robust" +preconditioners, with "low rank" version being chosen when you know in +advance that total count of non-box constraints won't exceed N, and "robust" +version being chosen when you need bulletproof solution. + +INPUT PARAMETERS: + State - structure stores algorithm state + UpdateFreq- update frequency. Preconditioner is rebuilt after every + UpdateFreq iterations. Recommended value: 10 or higher. + Zero value means that good default value will be used. + + -- ALGLIB -- + Copyright 26.09.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetprecexactrobust( + minnlcstate state, + ae_int_t updatefreq, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets preconditioner to "inexact LBFGS-based" mode. + +Preconditioning is very important for convergence of Augmented Lagrangian +algorithm because presence of penalty term makes problem ill-conditioned. +Difference between performance of preconditioned and unpreconditioned +methods can be as large as 100x! + +MinNLC optimizer may use following preconditioners, each with its own +benefits and drawbacks: + a) inexact LBFGS-based, with O(N*K) evaluation time + b) exact low rank one, with O(N*K^2) evaluation time + c) exact robust one, with O(N^3+K*N^2) evaluation time +where K is a total number of general linear and nonlinear constraints (box +ones are not counted). + +Inexact LBFGS-based preconditioner uses L-BFGS formula combined with +orthogonality assumption to perform very fast updates. For a N-dimensional +problem with K general linear or nonlinear constraints (boundary ones are +not counted) it has O(N*K) cost per iteration. This preconditioner has +best quality (less iterations) when general linear and nonlinear +constraints are orthogonal to each other (orthogonality with respect to +boundary constraints is not required). Number of iterations increases when +constraints are non-orthogonal, because algorithm assumes orthogonality, +but still it is better than no preconditioner at all. + +INPUT PARAMETERS: + State - structure stores algorithm state + + -- ALGLIB -- + Copyright 26.09.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetprecinexact( + minnlcstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets preconditioner to "turned off" mode. + +Preconditioning is very important for convergence of Augmented Lagrangian +algorithm because presence of penalty term makes problem ill-conditioned. +Difference between performance of preconditioned and unpreconditioned +methods can be as large as 100x! + +MinNLC optimizer may utilize two preconditioners, each with its own +benefits and drawbacks: a) inexact LBFGS-based, and b) exact low rank one. +It also provides special unpreconditioned mode of operation which can be +used for test purposes. + +This function activates this test mode. Do not use it in production code +to solve real-life problems. + +INPUT PARAMETERS: + State - structure stores algorithm state + + -- ALGLIB -- + Copyright 26.09.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetprecnone( + minnlcstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets scaling coefficients for NLC optimizer. + +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances). Scale of +the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the function + +Scaling is also used by finite difference variant of the optimizer - step +along I-th axis is equal to DiffStep*S[I]. + +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. + + -- ALGLIB -- + Copyright 06.06.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetscale( + minnlcstate state, + real_1d_array s, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +This function sets maximum step length (after scaling of step vector with +respect to variable scales specified by minnlcsetscale() call). + +INPUT PARAMETERS: + State - structure which stores algorithm state + StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if + you don't want to limit step length. + +Use this subroutine when you optimize target function which contains exp() +or other fast growing functions, and optimization algorithm makes too +large steps which leads to overflow. This function allows us to reject +steps that are too large (and therefore expose us to the possible +overflow) without actually calculating function value at the x+stp*d. + +NOTE: different solvers employed by MinNLC optimizer use different norms + for step; AUL solver uses 2-norm, whilst SLP solver uses INF-norm. + + -- ALGLIB -- + Copyright 02.04.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetstpmax( + minnlcstate state, + double stpmax, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function turns on/off reporting. + +INPUT PARAMETERS: + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not + +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to MinNLCOptimize(). + +NOTE: algorithm passes two parameters to rep() callback - current point + and penalized function value at current point. Important - function + value which is returned is NOT function being minimized. It is sum + of the value of the function being minimized - and penalty term. + + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnlcsetxrep( + minnlcstate state, + bool needxrep, + const xparams _params = alglib::xdefault); + +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nlcfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    //
    +    //     f0(x0,x1) = -x0+x1
    +    //     f1(x0,x1) = x0^2+x1^2-1
    +    //
    +    // and Jacobian matrix J = [dfi/dxj]
    +    //
    +    fi[0] = -x[0]+x[1];
    +    fi[1] = x[0]*x[0] + x[1]*x[1] - 1.0;
    +    jac[0][0] = -1.0;
    +    jac[0][1] = +1.0;
    +    jac[1][0] = 2*x[0];
    +    jac[1][1] = 2*x[1];
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = -x0+x1
    +    //
    +    // subject to nonlinear equality constraint
    +    //
    +    //    x0^2 + x1^2 - 1 = 0
    +    //
    +    real_1d_array x0 = "[0,0]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.000001;
    +    ae_int_t maxits = 0;
    +    minnlcstate state;
    +
    +    //
    +    // Create optimizer object and tune its settings:
    +    // * epsx=0.000001  stopping condition for inner iterations
    +    // * s=[1,1]        all variables have unit scale
    +    //
    +    minnlccreate(2, x0, state);
    +    minnlcsetcond(state, epsx, maxits);
    +    minnlcsetscale(state, s);
    +
    +    //
    +    // Choose one of the nonlinear programming solvers supported by minnlc
    +    // optimizer:
    +    // * SLP - successive linear programming NLP solver
    +    // * AUL - augmented Lagrangian NLP solver
    +    //
    +    // Different solvers have different properties:
    +    // * SLP is the most robust solver provided by ALGLIB: it can solve both
    +    //   convex and nonconvex optimization problems, it respects box and
    +    //   linear constraints (after you find feasible point it won't move away
    +    //   from the feasible area) and tries to respect nonlinear constraints
    +    //   as much as possible. It also usually needs less function evaluations
    +    //   to converge than AUL.
    +    //   However, it solves LP subproblems at each iterations which adds
    +    //   significant overhead to its running time. Sometimes it can be as much
    +    //   as 7x times slower than AUL.
    +    // * AUL solver is less robust than SLP - it can violate box and linear
    +    //   constraints at any moment, and it is intended for convex optimization
    +    //   problems (although in many cases it can deal with nonconvex ones too).
    +    //   Also, unlike SLP it needs some tuning (penalty factor and number of
    +    //   outer iterations).
    +    //   However, it is often much faster than the current version of SLP.
    +    //
    +    // In the code below we set solver to be AUL but then override it with SLP,
    +    // so the effective choice is to use SLP. We recommend you to use SLP at
    +    // least for early prototyping stages.
    +    //
    +    // You can comment out line with SLP if you want to solve your problem with
    +    // AUL solver.
    +    //
    +    double rho = 1000.0;
    +    ae_int_t outerits = 5;
    +    minnlcsetalgoaul(state, rho, outerits);
    +    minnlcsetalgoslp(state);
    +
    +    //
    +    // Set constraints:
    +    //
    +    // Nonlinear constraints are tricky - you can not "pack" general
    +    // nonlinear function into double precision array. That's why
    +    // minnlcsetnlc() does not accept constraints itself - only constraint
    +    // counts are passed: first parameter is number of equality constraints,
    +    // second one is number of inequality constraints.
    +    //
    +    // As for constraining functions - these functions are passed as part
    +    // of problem Jacobian (see below).
    +    //
    +    // NOTE: MinNLC optimizer supports arbitrary combination of boundary, general
    +    //       linear and general nonlinear constraints. This example does not
    +    //       show how to work with general linear constraints, but you can
    +    //       easily find it in documentation on minnlcsetbc() and
    +    //       minnlcsetlc() functions.
    +    //
    +    minnlcsetnlc(state, 1, 0);
    +
    +    //
    +    // Activate OptGuard integrity checking.
    +    //
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target/constraints (C0 continuity violation)
    +    // * nonsmoothness of the target/constraints (C1 continuity violation)
    +    // * erroneous analytic Jacobian, i.e. one inconsistent with actual
    +    //   change in the target/constraints
    +    //
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
    +    //
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION, THUS DO NOT USE IT IN PRODUCTION CODE!
    +    //
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
    +    //
    +    minnlcoptguardsmoothness(state);
    +    minnlcoptguardgradient(state, 0.001);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function and its Jacobian, with first
    +    // component (Jacobian row) being target function, and next components
    +    // (Jacobian rows) being nonlinear equality and inequality constraints.
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0,f1} = { -x0+x1 , x0^2+x1^2-1 }
    +    //
    +    // with Jacobian
    +    //
    +    //         [  -1    +1  ]
    +    //     J = [            ]
    +    //         [ 2*x0  2*x1 ]
    +    //
    +    // with f0 being target function, f1 being constraining function. Number
    +    // of equality/inequality constraints is specified by minnlcsetnlc(),
    +    // with equality ones always being first, inequality ones being last.
    +    //
    +    minnlcreport rep;
    +    real_1d_array x1;
    +    alglib::minnlcoptimize(state, nlcfunc1_jac);
    +    minnlcresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [0.70710,-0.70710]
    +
    +    //
    +    // Check that OptGuard did not report errors
    +    //
    +    // NOTE: want to test OptGuard? Try breaking the Jacobian - say, add
    +    //       1.0 to some of its components.
    +    //
    +    optguardreport ogrep;
    +    minnlcoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nlcfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    //
    +    //     f0(x0,x1) = -x0+x1
    +    //     f1(x0,x1) = x0^2+x1^2-1
    +    //
    +    // and Jacobian matrix J = [dfi/dxj]
    +    //
    +    fi[0] = -x[0]+x[1];
    +    fi[1] = x[0]*x[0] + x[1]*x[1] - 1.0;
    +    jac[0][0] = -1.0;
    +    jac[0][1] = +1.0;
    +    jac[1][0] = 2*x[0];
    +    jac[1][1] = 2*x[1];
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = -x0+x1
    +    //
    +    // subject to box constraints
    +    //
    +    //    x0>=0, x1>=0
    +    //
    +    // and nonlinear inequality constraint
    +    //
    +    //    x0^2 + x1^2 - 1 <= 0
    +    //
    +    real_1d_array x0 = "[0,0]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.000001;
    +    ae_int_t maxits = 0;
    +    real_1d_array bndl = "[0,0]";
    +    real_1d_array bndu = "[+inf,+inf]";
    +    minnlcstate state;
    +
    +    //
    +    // Create optimizer object and tune its settings:
    +    // * epsx=0.000001  stopping condition for inner iterations
    +    // * s=[1,1]        all variables have unit scale; it is important to
    +    //                  tell optimizer about scales of your variables - it
    +    //                  greatly accelerates convergence and helps to perform
    +    //                  some important integrity checks.
    +    //
    +    minnlccreate(2, x0, state);
    +    minnlcsetcond(state, epsx, maxits);
    +    minnlcsetscale(state, s);
    +
    +    //
    +    // Choose one of the nonlinear programming solvers supported by minnlc
    +    // optimizer:
    +    // * SQP - sequential quadratic programming NLP solver
    +    // * AUL - augmented Lagrangian NLP solver
    +    // * SLP - successive linear programming NLP solver
    +    //
    +    // Different solvers have different properties:
    +    // * SQP needs less function evaluations than any other solver, but it
    +    //   has much higher iteration cost than other solvers (a QP subproblem
    +    //   has to be solved during each step)
    +    // * AUL solver has cheaper iterations, but needs more target function
    +    //   evaluations
    +    // * SLP is the most robust solver provided by ALGLIB, but it performs
    +    //   order of magnitude more iterations than SQP.
    +    //
    +    // In the code below we set solver to be AUL but then override it with SLP,
    +    // and then with SQP, so the effective choice is to use SLP. We recommend
    +    // you to use SQP at least for early prototyping stages, and then switch
    +    // to AUL if possible.
    +    //
    +    double rho = 1000.0;
    +    ae_int_t outerits = 5;
    +    minnlcsetalgoaul(state, rho, outerits);
    +    minnlcsetalgoslp(state);
    +    minnlcsetalgosqp(state);
    +
    +    //
    +    // Set constraints:
    +    //
    +    // 1. boundary constraints are passed with minnlcsetbc() call
    +    //
    +    // 2. nonlinear constraints are more tricky - you can not "pack" general
    +    //    nonlinear function into double precision array. That's why
    +    //    minnlcsetnlc() does not accept constraints itself - only constraint
    +    //    counts are passed: first parameter is number of equality constraints,
    +    //    second one is number of inequality constraints.
    +    //
    +    //    As for constraining functions - these functions are passed as part
    +    //    of problem Jacobian (see below).
    +    //
    +    // NOTE: MinNLC optimizer supports arbitrary combination of boundary, general
    +    //       linear and general nonlinear constraints. This example does not
    +    //       show how to work with general linear constraints, but you can
    +    //       easily find it in documentation on minnlcsetlc() function.
    +    //
    +    minnlcsetbc(state, bndl, bndu);
    +    minnlcsetnlc(state, 0, 1);
    +
    +    //
    +    // Activate OptGuard integrity checking.
    +    //
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target/constraints (C0 continuity violation)
    +    // * nonsmoothness of the target/constraints (C1 continuity violation)
    +    // * erroneous analytic Jacobian, i.e. one inconsistent with actual
    +    //   change in the target/constraints
    +    //
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
    +    //
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION, THUS DO NOT USE IT IN PRODUCTION CODE!
    +    //
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
    +    //
    +    minnlcoptguardsmoothness(state);
    +    minnlcoptguardgradient(state, 0.001);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function and its Jacobian, with first
    +    // component (Jacobian row) being target function, and next components
    +    // (Jacobian rows) being nonlinear equality and inequality constraints.
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0,f1} = { -x0+x1 , x0^2+x1^2-1 }
    +    //
    +    // with Jacobian
    +    //
    +    //         [  -1    +1  ]
    +    //     J = [            ]
    +    //         [ 2*x0  2*x1 ]
    +    //
    +    // with f0 being target function, f1 being constraining function. Number
    +    // of equality/inequality constraints is specified by minnlcsetnlc(),
    +    // with equality ones always being first, inequality ones being last.
    +    //
    +    minnlcreport rep;
    +    real_1d_array x1;
    +    alglib::minnlcoptimize(state, nlcfunc1_jac);
    +    minnlcresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [1.0000,0.0000]
    +
    +    //
    +    // Check that OptGuard did not report errors
    +    //
    +    // NOTE: want to test OptGuard? Try breaking the Jacobian - say, add
    +    //       1.0 to some of its components.
    +    //
    +    optguardreport ogrep;
    +    minnlcoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nlcfunc2_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    //
    +    //     f0(x0,x1,x2) = x0+x1
    +    //     f1(x0,x1,x2) = x2-exp(x0)
    +    //     f2(x0,x1,x2) = x0^2+x1^2-1
    +    //
    +    // and Jacobian matrix J = [dfi/dxj]
    +    //
    +    fi[0] = x[0]+x[1];
    +    fi[1] = x[2]-exp(x[0]);
    +    fi[2] = x[0]*x[0] + x[1]*x[1] - 1.0;
    +    jac[0][0] = 1.0;
    +    jac[0][1] = 1.0;
    +    jac[0][2] = 0.0;
    +    jac[1][0] = -exp(x[0]);
    +    jac[1][1] = 0.0;
    +    jac[1][2] = 1.0;
    +    jac[2][0] = 2*x[0];
    +    jac[2][1] = 2*x[1];
    +    jac[2][2] = 0.0;
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = x0+x1
    +    //
    +    // subject to nonlinear inequality constraint
    +    //
    +    //    x0^2 + x1^2 - 1 <= 0
    +    //
    +    // and nonlinear equality constraint
    +    //
    +    //    x2-exp(x0) = 0
    +    //
    +    real_1d_array x0 = "[0,0,0]";
    +    real_1d_array s = "[1,1,1]";
    +    double epsx = 0.000001;
    +    ae_int_t maxits = 0;
    +    minnlcstate state;
    +    minnlcreport rep;
    +    real_1d_array x1;
    +
    +    //
    +    // Create optimizer object and tune its settings:
    +    // * epsx=0.000001  stopping condition for inner iterations
    +    // * s=[1,1]        all variables have unit scale
    +    // * upper limit on step length is specified (to avoid probing locations where exp() is large)
    +    //
    +    minnlccreate(3, x0, state);
    +    minnlcsetcond(state, epsx, maxits);
    +    minnlcsetscale(state, s);
    +    minnlcsetstpmax(state, 10.0);
    +
    +    //
    +    // Choose one of the nonlinear programming solvers supported by minnlc
    +    // optimizer:
    +    // * SLP - successive linear programming NLP solver
    +    // * AUL - augmented Lagrangian NLP solver
    +    //
    +    // Different solvers have different properties:
    +    // * SLP is the most robust solver provided by ALGLIB: it can solve both
    +    //   convex and nonconvex optimization problems, it respects box and
    +    //   linear constraints (after you find feasible point it won't move away
    +    //   from the feasible area) and tries to respect nonlinear constraints
    +    //   as much as possible. It also usually needs less function evaluations
    +    //   to converge than AUL.
    +    //   However, it solves LP subproblems at each iterations which adds
    +    //   significant overhead to its running time. Sometimes it can be as much
    +    //   as 7x times slower than AUL.
    +    // * AUL solver is less robust than SLP - it can violate box and linear
    +    //   constraints at any moment, and it is intended for convex optimization
    +    //   problems (although in many cases it can deal with nonconvex ones too).
    +    //   Also, unlike SLP it needs some tuning (penalty factor and number of
    +    //   outer iterations).
    +    //   However, it is often much faster than the current version of SLP.
    +    //
    +    // In the code below we set solver to be AUL but then override it with SLP,
    +    // so the effective choice is to use SLP. We recommend you to use SLP at
    +    // least for early prototyping stages.
    +    //
    +    // You can comment out line with SLP if you want to solve your problem with
    +    // AUL solver.
    +    //
    +    double rho = 1000.0;
    +    ae_int_t outerits = 5;
    +    minnlcsetalgoaul(state, rho, outerits);
    +    minnlcsetalgoslp(state);
    +
    +    //
    +    // Set constraints:
    +    //
    +    // Nonlinear constraints are tricky - you can not "pack" general
    +    // nonlinear function into double precision array. That's why
    +    // minnlcsetnlc() does not accept constraints itself - only constraint
    +    // counts are passed: first parameter is number of equality constraints,
    +    // second one is number of inequality constraints.
    +    //
    +    // As for constraining functions - these functions are passed as part
    +    // of problem Jacobian (see below).
    +    //
    +    // NOTE: MinNLC optimizer supports arbitrary combination of boundary, general
    +    //       linear and general nonlinear constraints. This example does not
    +    //       show how to work with boundary or general linear constraints, but you
    +    //       can easily find it in documentation on minnlcsetbc() and
    +    //       minnlcsetlc() functions.
    +    //
    +    minnlcsetnlc(state, 1, 1);
    +
    +    //
    +    // Activate OptGuard integrity checking.
    +    //
    +    // OptGuard monitor helps to catch common coding and problem statement
    +    // issues, like:
    +    // * discontinuity of the target/constraints (C0 continuity violation)
    +    // * nonsmoothness of the target/constraints (C1 continuity violation)
    +    // * erroneous analytic Jacobian, i.e. one inconsistent with actual
    +    //   change in the target/constraints
    +    //
    +    // OptGuard is essential for early prototyping stages because such
    +    // problems often result in premature termination of the optimizer
    +    // which is really hard to distinguish from the correct termination.
    +    //
    +    // IMPORTANT: GRADIENT VERIFICATION IS PERFORMED BY MEANS OF NUMERICAL
    +    //            DIFFERENTIATION, THUS DO NOT USE IT IN PRODUCTION CODE!
    +    //
    +    //            Other OptGuard checks add moderate overhead, but anyway
    +    //            it is better to turn them off when they are not needed.
    +    //
    +    minnlcoptguardsmoothness(state);
    +    minnlcoptguardgradient(state, 0.001);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function and its Jacobian, with first
    +    // component (Jacobian row) being target function, and next components
    +    // (Jacobian rows) being nonlinear equality and inequality constraints.
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0,f1,f2} = { x0+x1 , x2-exp(x0) , x0^2+x1^2-1 }
    +    //
    +    // with Jacobian
    +    //
    +    //         [  +1      +1       0 ]
    +    //     J = [-exp(x0)  0        1 ]
    +    //         [ 2*x0    2*x1      0 ]
    +    //
    +    // with f0 being target function, f1 being equality constraint "f1=0",
    +    // f2 being inequality constraint "f2<=0". Number of equality/inequality
    +    // constraints is specified by minnlcsetnlc(), with equality ones always
    +    // being first, inequality ones being last.
    +    //
    +    alglib::minnlcoptimize(state, nlcfunc2_jac);
    +    minnlcresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [-0.70710,-0.70710,0.49306]
    +
    +    //
    +    // Check that OptGuard did not report errors
    +    //
    +    // NOTE: want to test OptGuard? Try breaking the Jacobian - say, add
    +    //       1.0 to some of its components.
    +    //
    +    optguardreport ogrep;
    +    minnlcoptguardresults(state, ogrep);
    +    printf("%s\n", ogrep.badgradsuspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc0suspected ? "true" : "false"); // EXPECTED: false
    +    printf("%s\n", ogrep.nonc1suspected ? "true" : "false"); // EXPECTED: false
    +    return 0;
    +}
    +
    +
    +
    +
    + +minnsreport
    +minnsstate
    + +minnscreate
    +minnscreatef
    +minnsoptimize
    +minnsrequesttermination
    +minnsrestartfrom
    +minnsresults
    +minnsresultsbuf
    +minnssetalgoags
    +minnssetbc
    +minnssetcond
    +minnssetlc
    +minnssetnlc
    +minnssetscale
    +minnssetxrep
    + + + + + + +
    minns_d_bc Nonsmooth box constrained optimization
    minns_d_diff Nonsmooth unconstrained optimization with numerical differentiation
    minns_d_nlc Nonsmooth nonlinearly constrained optimization
    minns_d_unconstrained Nonsmooth unconstrained optimization
    + +
    +
    /************************************************************************* +This structure stores optimization report: +* IterationsCount total number of inner iterations +* NFEV number of gradient evaluations +* TerminationType termination type (see below) +* CErr maximum violation of all types of constraints +* LCErr maximum violation of linear constraints +* NLCErr maximum violation of nonlinear constraints + +TERMINATION CODES + +TerminationType field contains completion code, which can be: + -8 internal integrity control detected infinite or NAN values in + function/gradient. Abnormal termination signalled. + -3 box constraints are inconsistent + -1 inconsistent parameters were passed: + * penalty parameter for minnssetalgoags() is zero, + but we have nonlinear constraints set by minnssetnlc() + 2 sampling radius decreased below epsx + 5 MaxIts steps was taken + 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. + 8 User requested termination via MinNSRequestTermination() + +Other fields of this structure are not documented and should not be used! +*************************************************************************/ +
    class minnsreport +{ + ae_int_t iterationscount; + ae_int_t nfev; + double cerr; + double lcerr; + double nlcerr; + ae_int_t terminationtype; + ae_int_t varidx; + ae_int_t funcidx; +}; + +
    + +
    +
    /************************************************************************* +This object stores nonlinear optimizer state. +You should use functions provided by MinNS subpackage to work with this +object +*************************************************************************/ +
    class minnsstate +{ +}; + +
    + +
    +
    /************************************************************************* + NONSMOOTH NONCONVEX OPTIMIZATION + SUBJECT TO BOX/LINEAR/NONLINEAR-NONSMOOTH CONSTRAINTS + +DESCRIPTION: + +The subroutine minimizes function F(x) of N arguments subject to any +combination of: +* bound constraints +* linear inequality constraints +* linear equality constraints +* nonlinear equality constraints Gi(x)=0 +* nonlinear inequality constraints Hi(x)<=0 + +IMPORTANT: see MinNSSetAlgoAGS for important information on performance + restrictions of AGS solver. + +REQUIREMENTS: +* starting point X0 must be feasible or not too far away from the feasible + set +* F(), G(), H() are continuous, locally Lipschitz and continuously (but + not necessarily twice) differentiable in an open dense subset of R^N. + Functions F(), G() and H() may be nonsmooth and non-convex. + Informally speaking, it means that functions are composed of large + differentiable "patches" with nonsmoothness having place only at the + boundaries between these "patches". + Most real-life nonsmooth functions satisfy these requirements. Say, + anything which involves finite number of abs(), min() and max() is very + likely to pass the test. + Say, it is possible to optimize anything of the following: + * f=abs(x0)+2*abs(x1) + * f=max(x0,x1) + * f=sin(max(x0,x1)+abs(x2)) +* for nonlinearly constrained problems: F() must be bounded from below + without nonlinear constraints (this requirement is due to the fact that, + contrary to box and linear constraints, nonlinear ones require special + handling). +* user must provide function value and gradient for F(), H(), G() at all + points where function/gradient can be calculated. If optimizer requires + value exactly at the boundary between "patches" (say, at x=0 for f=abs(x)), + where gradient is not defined, user may resolve tie arbitrarily (in our + case - return +1 or -1 at its discretion). +* NS solver supports numerical differentiation, i.e. it may differentiate + your function for you, but it results in 2N increase of function + evaluations. Not recommended unless you solve really small problems. See + minnscreatef() for more information on this functionality. + +USAGE: + +1. User initializes algorithm state with MinNSCreate() call and chooses + what NLC solver to use. There is some solver which is used by default, + with default settings, but you should NOT rely on default choice. It + may change in future releases of ALGLIB without notice, and no one can + guarantee that new solver will be able to solve your problem with + default settings. + + From the other side, if you choose solver explicitly, you can be pretty + sure that it will work with new ALGLIB releases. + + In the current release following solvers can be used: + * AGS solver (activated with MinNSSetAlgoAGS() function) + +2. User adds boundary and/or linear and/or nonlinear constraints by means + of calling one of the following functions: + a) MinNSSetBC() for boundary constraints + b) MinNSSetLC() for linear constraints + c) MinNSSetNLC() for nonlinear constraints + You may combine (a), (b) and (c) in one optimization problem. + +3. User sets scale of the variables with MinNSSetScale() function. It is + VERY important to set scale of the variables, because nonlinearly + constrained problems are hard to solve when variables are badly scaled. + +4. User sets stopping conditions with MinNSSetCond(). + +5. Finally, user calls MinNSOptimize() function which takes algorithm + state and pointer (delegate, etc) to callback function which calculates + F/G/H. + +7. User calls MinNSResults() to get solution + +8. Optionally user may call MinNSRestartFrom() to solve another problem + with same N but another starting point. MinNSRestartFrom() allows to + reuse already initialized structure. + + +INPUT PARAMETERS: + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + X - starting point, array[N]: + * it is better to set X to a feasible point + * but X can be infeasible, in which case algorithm will try + to find feasible point first, using X as initial + approximation. + +OUTPUT PARAMETERS: + State - structure stores algorithm state + +NOTE: minnscreatef() function may be used if you do not have analytic + gradient. This function creates solver which uses numerical + differentiation with user-specified step. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnscreate( + real_1d_array x, + minnsstate& state, + const xparams _params = alglib::xdefault); +void alglib::minnscreate( + ae_int_t n, + real_1d_array x, + minnsstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  

    + +
    +
    /************************************************************************* +Version of minnscreatef() which uses numerical differentiation. I.e., you +do not have to calculate derivatives yourself. However, this version needs +2N times more function evaluations. + +2-point differentiation formula is used, because more precise 4-point +formula is unstable when used on non-smooth functions. + +INPUT PARAMETERS: + N - problem dimension, N>0: + * if given, only leading N elements of X are used + * if not given, automatically determined from size of X + X - starting point, array[N]: + * it is better to set X to a feasible point + * but X can be infeasible, in which case algorithm will try + to find feasible point first, using X as initial + approximation. + DiffStep- differentiation step, DiffStep>0. Algorithm performs + numerical differentiation with step for I-th variable + being equal to DiffStep*S[I] (here S[] is a scale vector, + set by minnssetscale() function). + Do not use too small steps, because it may lead to + catastrophic cancellation during intermediate calculations. + +OUTPUT PARAMETERS: + State - structure stores algorithm state + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnscreatef( + real_1d_array x, + double diffstep, + minnsstate& state, + const xparams _params = alglib::xdefault); +void alglib::minnscreatef( + ae_int_t n, + real_1d_array x, + double diffstep, + minnsstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This family of functions is used to launcn iterations of nonlinear optimizer + +These functions accept following parameters: + state - algorithm state + fvec - callback which calculates function vector fi[] + at given point x + jac - callback which calculates function vector fi[] + and Jacobian jac at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL + + +NOTES: + +1. This function has two different implementations: one which uses exact + (analytical) user-supplied Jacobian, and one which uses only function + vector and numerically differentiates function in order to obtain + gradient. + + Depending on the specific function used to create optimizer object + you should choose appropriate variant of minnsoptimize() - one which + accepts function AND Jacobian or one which accepts ONLY function. + + Be careful to choose variant of minnsoptimize() which corresponds to + your optimization scheme! Table below lists different combinations of + callback (function/gradient) passed to minnsoptimize() and specific + function used to create optimizer. + + + | USER PASSED TO minnsoptimize() + CREATED WITH | function only | function and gradient + ------------------------------------------------------------ + minnscreatef() | works FAILS + minnscreate() | FAILS works + + Here "FAILS" denotes inappropriate combinations of optimizer creation + function and minnsoptimize() version. Attemps to use such + combination will lead to exception. Either you did not pass gradient + when it WAS needed or you passed gradient when it was NOT needed. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void minnsoptimize(minnsstate &state, + void (*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +void minnsoptimize(minnsstate &state, + void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault); +
    +

    Examples:   [1]  [2]  [3]  [4]  

    + +
    +
    /************************************************************************* +This subroutine submits request for termination of running optimizer. It +should be called from user-supplied callback when user decides that it is +time to "smoothly" terminate optimization process. As result, optimizer +stops at point which was "current accepted" when termination request was +submitted and returns error code 8 (successful termination). + +INPUT PARAMETERS: + State - optimizer structure + +NOTE: after request for termination optimizer may perform several + additional calls to user-supplied callbacks. It does NOT guarantee + to stop immediately - it just guarantees that these additional calls + will be discarded later. + +NOTE: calling this function on optimizer which is NOT running will have no + effect. + +NOTE: multiple calls to this function are possible. First call is counted, + subsequent calls are silently ignored. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnsrequesttermination( + minnsstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine restarts algorithm from new point. +All optimization parameters (including constraints) are left unchanged. + +This function allows to solve multiple optimization problems (which +must have same number of dimensions) without object reallocation penalty. + +INPUT PARAMETERS: + State - structure previously allocated with minnscreate() call. + X - new starting point. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnsrestartfrom( + minnsstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +MinNS results + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + X - array[0..N-1], solution + Rep - optimization report. You should check Rep.TerminationType + in order to distinguish successful termination from + unsuccessful one: + * -8 internal integrity control detected infinite or + NAN values in function/gradient. Abnormal + termination signalled. + * -3 box constraints are inconsistent + * -1 inconsistent parameters were passed: + * penalty parameter for minnssetalgoags() is zero, + but we have nonlinear constraints set by minnssetnlc() + * 2 sampling radius decreased below epsx + * 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. + * 8 User requested termination via minnsrequesttermination() + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnsresults( + minnsstate state, + real_1d_array& x, + minnsreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  

    + +
    +
    /************************************************************************* + +Buffered implementation of minnsresults() which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnsresultsbuf( + minnsstate state, + real_1d_array& x, + minnsreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells MinNS unit to use AGS (adaptive gradient sampling) +algorithm for nonsmooth constrained optimization. This algorithm is a +slight modification of one described in "An Adaptive Gradient Sampling +Algorithm for Nonsmooth Optimization" by Frank E. Curtisy and Xiaocun Quez. + +This optimizer has following benefits and drawbacks: ++ robustness; it can be used with nonsmooth and nonconvex functions. ++ relatively easy tuning; most of the metaparameters are easy to select. +- it has convergence of steepest descent, slower than CG/LBFGS. +- each iteration involves evaluation of ~2N gradient values and solution + of 2Nx2N quadratic programming problem, which limits applicability of + algorithm by small-scale problems (up to 50-100). + +IMPORTANT: this algorithm has convergence guarantees, i.e. it will + steadily move towards some stationary point of the function. + + However, "stationary point" does not always mean "solution". + Nonsmooth problems often have "flat spots", i.e. areas where + function do not change at all. Such "flat spots" are stationary + points by definition, and algorithm may be caught here. + + Nonsmooth CONVEX tasks are not prone to this problem. Say, if + your function has form f()=MAX(f0,f1,...), and f_i are convex, + then f() is convex too and you have guaranteed convergence to + solution. + +INPUT PARAMETERS: + State - structure which stores algorithm state + Radius - initial sampling radius, >=0. + + Internally multiplied by vector of per-variable scales + specified by minnssetscale()). + + You should select relatively large sampling radius, roughly + proportional to scaled length of the first steps of the + algorithm. Something close to 0.1 in magnitude should be + good for most problems. + + AGS solver can automatically decrease radius, so too large + radius is not a problem (assuming that you won't choose + so large radius that algorithm will sample function in + too far away points, where gradient value is irrelevant). + + Too small radius won't cause algorithm to fail, but it may + slow down algorithm (it may have to perform too short + steps). + Penalty - penalty coefficient for nonlinear constraints: + * for problem with nonlinear constraints should be some + problem-specific positive value, large enough that + penalty term changes shape of the function. + Starting from some problem-specific value penalty + coefficient becomes large enough to exactly enforce + nonlinear constraints; larger values do not improve + precision. + Increasing it too much may slow down convergence, so you + should choose it carefully. + * can be zero for problems WITHOUT nonlinear constraints + (i.e. for unconstrained ones or ones with just box or + linear constraints) + * if you specify zero value for problem with at least one + nonlinear constraint, algorithm will terminate with + error code -1. + +ALGORITHM OUTLINE + +The very basic outline of unconstrained AGS algorithm is given below: + +0. If sampling radius is below EpsX or we performed more then MaxIts + iterations - STOP. +1. sample O(N) gradient values at random locations around current point; + informally speaking, this sample is an implicit piecewise linear model + of the function, although algorithm formulation does not mention that + explicitly +2. solve quadratic programming problem in order to find descent direction +3. if QP solver tells us that we are near solution, decrease sampling + radius and move to (0) +4. perform backtracking line search +5. after moving to new point, goto (0) + +As for the constraints: +* box constraints are handled exactly by modification of the function + being minimized +* linear/nonlinear constraints are handled by adding L1 penalty. Because + our solver can handle nonsmoothness, we can use L1 penalty function, + which is an exact one (i.e. exact solution is returned under such + penalty). +* penalty coefficient for linear constraints is chosen automatically; + however, penalty coefficient for nonlinear constraints must be specified + by user. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetalgoags( + minnsstate state, + double radius, + double penalty, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  

    + +
    +
    /************************************************************************* +This function sets boundary constraints. + +Boundary constraints are inactive by default (after initial creation). +They are preserved after algorithm restart with minnsrestartfrom(). + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[N]. + If some (all) variables are unbounded, you may specify + very small number or -INF. + BndU - upper bounds, array[N]. + If some (all) variables are unbounded, you may specify + very large number or +INF. + +NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th +variable will be "frozen" at X[i]=BndL[i]=BndU[i]. + +NOTE 2: AGS solver has following useful properties: +* bound constraints are always satisfied exactly +* function is evaluated only INSIDE area specified by bound constraints, + even when numerical differentiation is used (algorithm adjusts nodes + according to boundary constraints) + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetbc( + minnsstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets stopping conditions for iterations of optimizer. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsX - >=0 + The AGS solver finishes its work if on k+1-th iteration + sampling radius decreases below EpsX. + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. + +Passing EpsX=0 and MaxIts=0 (simultaneously) will lead to automatic +stopping criterion selection. We do not recommend you to rely on default +choice in production code. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetcond( + minnsstate state, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  

    + +
    +
    /************************************************************************* +This function sets linear constraints. + +Linear constraints are inactive by default (after initial creation). +They are preserved after algorithm restart with minnsrestartfrom(). + +INPUT PARAMETERS: + State - structure previously allocated with minnscreate() call. + C - linear constraints, array[K,N+1]. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT + +NOTE: linear (non-bound) constraints are satisfied only approximately: + +* there always exists some minor violation (about current sampling radius + in magnitude during optimization, about EpsX in the solution) due to use + of penalty method to handle constraints. +* numerical differentiation, if used, may lead to function evaluations + outside of the feasible area, because algorithm does NOT change + numerical differentiation formula according to linear constraints. + +If you want constraints to be satisfied exactly, try to reformulate your +problem in such manner that all constraints will become boundary ones +(this kind of constraints is always satisfied exactly, both in the final +solution and in all intermediate points). + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetlc( + minnsstate state, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::minnssetlc( + minnsstate state, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets nonlinear constraints. + +In fact, this function sets NUMBER of nonlinear constraints. Constraints +itself (constraint functions) are passed to minnsoptimize() method. This +method requires user-defined vector function F[] and its Jacobian J[], +where: +* first component of F[] and first row of Jacobian J[] correspond to + function being minimized +* next NLEC components of F[] (and rows of J) correspond to nonlinear + equality constraints G_i(x)=0 +* next NLIC components of F[] (and rows of J) correspond to nonlinear + inequality constraints H_i(x)<=0 + +NOTE: you may combine nonlinear constraints with linear/boundary ones. If + your problem has mixed constraints, you may explicitly specify some + of them as linear ones. It may help optimizer to handle them more + efficiently. + +INPUT PARAMETERS: + State - structure previously allocated with minnscreate() call. + NLEC - number of Non-Linear Equality Constraints (NLEC), >=0 + NLIC - number of Non-Linear Inquality Constraints (NLIC), >=0 + +NOTE 1: nonlinear constraints are satisfied only approximately! It is + possible that algorithm will evaluate function outside of + the feasible area! + +NOTE 2: algorithm scales variables according to scale specified by + minnssetscale() function, so it can handle problems with badly + scaled variables (as long as we KNOW their scales). + + However, there is no way to automatically scale nonlinear + constraints Gi(x) and Hi(x). Inappropriate scaling of Gi/Hi may + ruin convergence. Solving problem with constraint "1000*G0(x)=0" + is NOT same as solving it with constraint "0.001*G0(x)=0". + + It means that YOU are the one who is responsible for correct + scaling of nonlinear constraints Gi(x) and Hi(x). We recommend you + to scale nonlinear constraints in such way that I-th component of + dG/dX (or dH/dx) has approximately unit magnitude (for problems + with unit scale) or has magnitude approximately equal to 1/S[i] + (where S is a scale set by minnssetscale() function). + +NOTE 3: nonlinear constraints are always hard to handle, no matter what + algorithm you try to use. Even basic box/linear constraints modify + function curvature by adding valleys and ridges. However, + nonlinear constraints add valleys which are very hard to follow + due to their "curved" nature. + + It means that optimization with single nonlinear constraint may be + significantly slower than optimization with multiple linear ones. + It is normal situation, and we recommend you to carefully choose + Rho parameter of minnssetalgoags(), because too large value may + slow down convergence. + + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetnlc( + minnsstate state, + ae_int_t nlec, + ae_int_t nlic, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets scaling coefficients for NLC optimizer. + +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances). Scale of +the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the function + +Scaling is also used by finite difference variant of the optimizer - step +along I-th axis is equal to DiffStep*S[I]. + +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetscale( + minnsstate state, + real_1d_array s, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  

    + +
    +
    /************************************************************************* +This function turns on/off reporting. + +INPUT PARAMETERS: + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not + +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to minnsoptimize(). + + -- ALGLIB -- + Copyright 28.11.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minnssetxrep( + minnsstate state, + bool needxrep, + const xparams _params = alglib::xdefault); + +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nsfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    //
    +    //     f0(x0,x1) = 2*|x0|+x1
    +    //
    +    // and Jacobian matrix J = [df0/dx0 df0/dx1]
    +    //
    +    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    +    jac[0][0] = 2*alglib::sign(x[0]);
    +    jac[0][1] = alglib::sign(x[1]);
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = 2*|x0|+|x1|
    +    //
    +    // subject to box constraints
    +    //
    +    //        1 <= x0 < +INF
    +    //     -INF <= x1 < +INF
    +    //
    +    // using nonsmooth nonlinear optimizer.
    +    //
    +    real_1d_array x0 = "[1,1]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[1,-inf]";
    +    real_1d_array bndu = "[+inf,+inf]";
    +    double epsx = 0.00001;
    +    double radius = 0.1;
    +    double rho = 0.0;
    +    ae_int_t maxits = 0;
    +    minnsstate state;
    +    minnsreport rep;
    +    real_1d_array x1;
    +
    +    //
    +    // Create optimizer object, choose AGS algorithm and tune its settings:
    +    // * radius=0.1     good initial value; will be automatically decreased later.
    +    // * rho=0.0        penalty coefficient for nonlinear constraints; can be zero
    +    //                  because we do not have such constraints
    +    // * epsx=0.000001  stopping conditions
    +    // * s=[1,1]        all variables have unit scale
    +    //
    +    minnscreate(2, x0, state);
    +    minnssetalgoags(state, radius, rho);
    +    minnssetcond(state, epsx, maxits);
    +    minnssetscale(state, s);
    +
    +    //
    +    // Set box constraints.
    +    //
    +    // General linear constraints are set in similar way (see comments on
    +    // minnssetlc() function for more information).
    +    //
    +    // You may combine box, linear and nonlinear constraints in one optimization
    +    // problem.
    +    //
    +    minnssetbc(state, bndl, bndu);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function and its Jacobian, with first
    +    // component (Jacobian row) being target function, and next components
    +    // (Jacobian rows) being nonlinear equality and inequality constraints
    +    // (box/linear ones are passed separately by means of minnssetbc() and
    +    // minnssetlc() calls).
    +    //
    +    // If you do not have nonlinear constraints (exactly our situation), then
    +    // you will have one-component function vector and 1xN Jacobian matrix.
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0} = { 2*|x0|+|x1| }
    +    //
    +    // with Jacobian
    +    //
    +    //         [                       ]
    +    //     J = [ 2*sign(x0)   sign(x1) ]
    +    //         [                       ]
    +    //
    +    // NOTE: nonsmooth optimizer requires considerably more function
    +    //       evaluations than smooth solver - about 2N times more. Using
    +    //       numerical differentiation introduces additional (multiplicative)
    +    //       2N speedup.
    +    //
    +    //       It means that if smooth optimizer WITH user-supplied gradient
    +    //       needs 100 function evaluations to solve 50-dimensional problem,
    +    //       then AGS solver with user-supplied gradient will need about 10.000
    +    //       function evaluations, and with numerical gradient about 1.000.000
    +    //       function evaluations will be performed.
    +    //
    +    // NOTE: AGS solver used by us can handle nonsmooth and nonconvex
    +    //       optimization problems. It has convergence guarantees, i.e. it will
    +    //       converge to stationary point of the function after running for some
    +    //       time.
    +    //
    +    //       However, it is important to remember that "stationary point" is not
    +    //       equal to "solution". If your problem is convex, everything is OK.
    +    //       But nonconvex optimization problems may have "flat spots" - large
    +    //       areas where gradient is exactly zero, but function value is far away
    +    //       from optimal. Such areas are stationary points too, and optimizer
    +    //       may be trapped here.
    +    //
    +    //       "Flat spots" are nonsmooth equivalent of the saddle points, but with
    +    //       orders of magnitude worse properties - they may be quite large and
    +    //       hard to avoid. All nonsmooth optimizers are prone to this kind of the
    +    //       problem, because it is impossible to automatically distinguish "flat
    +    //       spot" from true solution.
    +    //
    +    //       This note is here to warn you that you should be very careful when
    +    //       you solve nonsmooth optimization problems. Visual inspection of
    +    //       results is essential.
    +    //
    +    //
    +    alglib::minnsoptimize(state, nsfunc1_jac);
    +    minnsresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [1.0000,0.0000]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nsfunc1_fvec(const real_1d_array &x, real_1d_array &fi, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    //
    +    //     f0(x0,x1) = 2*|x0|+x1
    +    //
    +    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = 2*|x0|+|x1|
    +    //
    +    // using nonsmooth nonlinear optimizer with numerical
    +    // differentiation provided by ALGLIB.
    +    //
    +    // NOTE: nonsmooth optimizer requires considerably more function
    +    //       evaluations than smooth solver - about 2N times more. Using
    +    //       numerical differentiation introduces additional (multiplicative)
    +    //       2N speedup.
    +    //
    +    //       It means that if smooth optimizer WITH user-supplied gradient
    +    //       needs 100 function evaluations to solve 50-dimensional problem,
    +    //       then AGS solver with user-supplied gradient will need about 10.000
    +    //       function evaluations, and with numerical gradient about 1.000.000
    +    //       function evaluations will be performed.
    +    //
    +    real_1d_array x0 = "[1,1]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.00001;
    +    double diffstep = 0.000001;
    +    double radius = 0.1;
    +    double rho = 0.0;
    +    ae_int_t maxits = 0;
    +    minnsstate state;
    +    minnsreport rep;
    +    real_1d_array x1;
    +
    +    //
    +    // Create optimizer object, choose AGS algorithm and tune its settings:
    +    // * radius=0.1     good initial value; will be automatically decreased later.
    +    // * rho=0.0        penalty coefficient for nonlinear constraints; can be zero
    +    //                  because we do not have such constraints
    +    // * epsx=0.000001  stopping conditions
    +    // * s=[1,1]        all variables have unit scale
    +    //
    +    minnscreatef(2, x0, diffstep, state);
    +    minnssetalgoags(state, radius, rho);
    +    minnssetcond(state, epsx, maxits);
    +    minnssetscale(state, s);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function, with first component
    +    // being target function, and next components being nonlinear equality
    +    // and inequality constraints (box/linear ones are passed separately
    +    // by means of minnssetbc() and minnssetlc() calls).
    +    //
    +    // If you do not have nonlinear constraints (exactly our situation), then
    +    // you will have one-component function vector.
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0} = { 2*|x0|+|x1| }
    +    //
    +    alglib::minnsoptimize(state, nsfunc1_fvec);
    +    minnsresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [0.0000,0.0000]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nsfunc2_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates function vector
    +    //
    +    //     f0(x0,x1) = 2*|x0|+x1
    +    //     f1(x0,x1) = x0-1
    +    //     f2(x0,x1) = -x1-1
    +    //
    +    // and Jacobian matrix J
    +    //
    +    //         [ df0/dx0   df0/dx1 ]
    +    //     J = [ df1/dx0   df1/dx1 ]
    +    //         [ df2/dx0   df2/dx1 ]
    +    //
    +    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    +    jac[0][0] = 2*alglib::sign(x[0]);
    +    jac[0][1] = alglib::sign(x[1]);
    +    fi[1] = x[0]-1;
    +    jac[1][0] = 1;
    +    jac[1][1] = 0;
    +    fi[2] = -x[1]-1;
    +    jac[2][0] = 0;
    +    jac[2][1] = -1;
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = 2*|x0|+|x1|
    +    //
    +    // subject to combination of equality and inequality constraints
    +    //
    +    //      x0  =  1
    +    //      x1 >= -1
    +    //
    +    // using nonsmooth nonlinear optimizer. Although these constraints
    +    // are linear, we treat them as general nonlinear ones in order to
    +    // demonstrate nonlinearly constrained optimization setup.
    +    //
    +    real_1d_array x0 = "[1,1]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.00001;
    +    double radius = 0.1;
    +    double rho = 50.0;
    +    ae_int_t maxits = 0;
    +    minnsstate state;
    +    minnsreport rep;
    +    real_1d_array x1;
    +
    +    //
    +    // Create optimizer object, choose AGS algorithm and tune its settings:
    +    // * radius=0.1     good initial value; will be automatically decreased later.
    +    // * rho=50.0       penalty coefficient for nonlinear constraints. It is your
    +    //                  responsibility to choose good one - large enough that it
    +    //                  enforces constraints, but small enough in order to avoid
    +    //                  extreme slowdown due to ill-conditioning.
    +    // * epsx=0.000001  stopping conditions
    +    // * s=[1,1]        all variables have unit scale
    +    //
    +    minnscreate(2, x0, state);
    +    minnssetalgoags(state, radius, rho);
    +    minnssetcond(state, epsx, maxits);
    +    minnssetscale(state, s);
    +
    +    //
    +    // Set general nonlinear constraints.
    +    //
    +    // This part is more tricky than working with box/linear constraints - you
    +    // can not "pack" general nonlinear function into double precision array.
    +    // That's why minnssetnlc() does not accept constraints itself - only
    +    // constraint COUNTS are passed: first parameter is number of equality
    +    // constraints, second one is number of inequality constraints.
    +    //
    +    // As for constraining functions - these functions are passed as part
    +    // of problem Jacobian (see below).
    +    //
    +    // NOTE: MinNS optimizer supports arbitrary combination of boundary, general
    +    //       linear and general nonlinear constraints. This example does not
    +    //       show how to work with general linear constraints, but you can
    +    //       easily find it in documentation on minnlcsetlc() function.
    +    //
    +    minnssetnlc(state, 1, 1);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function and its Jacobian, with first
    +    // component (Jacobian row) being target function, and next components
    +    // (Jacobian rows) being nonlinear equality and inequality constraints
    +    // (box/linear ones are passed separately by means of minnssetbc() and
    +    // minnssetlc() calls).
    +    //
    +    // Nonlinear equality constraints have form Gi(x)=0, inequality ones
    +    // have form Hi(x)<=0, so we may have to "normalize" constraints prior
    +    // to passing them to optimizer (right side is zero, constraints are
    +    // sorted, multiplied by -1 when needed).
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0,f1,f2} = { 2*|x0|+|x1|,  x0-1, -x1-1 }
    +    //
    +    // with Jacobian
    +    //
    +    //         [ 2*sign(x0)   sign(x1) ]
    +    //     J = [     1           0     ]
    +    //         [     0          -1     ]
    +    //
    +    // which means that we have optimization problem
    +    //
    +    //     min{f0} subject to f1=0, f2<=0
    +    //
    +    // which is essentially same as
    +    //
    +    //     min { 2*|x0|+|x1| } subject to x0=1, x1>=-1
    +    //
    +    // NOTE: AGS solver used by us can handle nonsmooth and nonconvex
    +    //       optimization problems. It has convergence guarantees, i.e. it will
    +    //       converge to stationary point of the function after running for some
    +    //       time.
    +    //
    +    //       However, it is important to remember that "stationary point" is not
    +    //       equal to "solution". If your problem is convex, everything is OK.
    +    //       But nonconvex optimization problems may have "flat spots" - large
    +    //       areas where gradient is exactly zero, but function value is far away
    +    //       from optimal. Such areas are stationary points too, and optimizer
    +    //       may be trapped here.
    +    //
    +    //       "Flat spots" are nonsmooth equivalent of the saddle points, but with
    +    //       orders of magnitude worse properties - they may be quite large and
    +    //       hard to avoid. All nonsmooth optimizers are prone to this kind of the
    +    //       problem, because it is impossible to automatically distinguish "flat
    +    //       spot" from true solution.
    +    //
    +    //       This note is here to warn you that you should be very careful when
    +    //       you solve nonsmooth optimization problems. Visual inspection of
    +    //       results is essential.
    +    //
    +    alglib::minnsoptimize(state, nsfunc2_jac);
    +    minnsresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [1.0000,0.0000]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +void  nsfunc1_jac(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr)
    +{
    +    //
    +    // this callback calculates
    +    //
    +    //     f0(x0,x1) = 2*|x0|+x1
    +    //
    +    // and Jacobian matrix J = [df0/dx0 df0/dx1]
    +    //
    +    fi[0] = 2*fabs(double(x[0]))+fabs(double(x[1]));
    +    jac[0][0] = 2*alglib::sign(x[0]);
    +    jac[0][1] = alglib::sign(x[1]);
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of
    +    //
    +    //     f(x0,x1) = 2*|x0|+|x1|
    +    //
    +    // using nonsmooth nonlinear optimizer.
    +    //
    +    real_1d_array x0 = "[1,1]";
    +    real_1d_array s = "[1,1]";
    +    double epsx = 0.00001;
    +    double radius = 0.1;
    +    double rho = 0.0;
    +    ae_int_t maxits = 0;
    +    minnsstate state;
    +    minnsreport rep;
    +    real_1d_array x1;
    +
    +    //
    +    // Create optimizer object, choose AGS algorithm and tune its settings:
    +    // * radius=0.1     good initial value; will be automatically decreased later.
    +    // * rho=0.0        penalty coefficient for nonlinear constraints; can be zero
    +    //                  because we do not have such constraints
    +    // * epsx=0.000001  stopping conditions
    +    // * s=[1,1]        all variables have unit scale
    +    //
    +    minnscreate(2, x0, state);
    +    minnssetalgoags(state, radius, rho);
    +    minnssetcond(state, epsx, maxits);
    +    minnssetscale(state, s);
    +
    +    //
    +    // Optimize and test results.
    +    //
    +    // Optimizer object accepts vector function and its Jacobian, with first
    +    // component (Jacobian row) being target function, and next components
    +    // (Jacobian rows) being nonlinear equality and inequality constraints
    +    // (box/linear ones are passed separately by means of minnssetbc() and
    +    // minnssetlc() calls).
    +    //
    +    // If you do not have nonlinear constraints (exactly our situation), then
    +    // you will have one-component function vector and 1xN Jacobian matrix.
    +    //
    +    // So, our vector function has form
    +    //
    +    //     {f0} = { 2*|x0|+|x1| }
    +    //
    +    // with Jacobian
    +    //
    +    //         [                       ]
    +    //     J = [ 2*sign(x0)   sign(x1) ]
    +    //         [                       ]
    +    //
    +    // NOTE: nonsmooth optimizer requires considerably more function
    +    //       evaluations than smooth solver - about 2N times more. Using
    +    //       numerical differentiation introduces additional (multiplicative)
    +    //       2N speedup.
    +    //
    +    //       It means that if smooth optimizer WITH user-supplied gradient
    +    //       needs 100 function evaluations to solve 50-dimensional problem,
    +    //       then AGS solver with user-supplied gradient will need about 10.000
    +    //       function evaluations, and with numerical gradient about 1.000.000
    +    //       function evaluations will be performed.
    +    //
    +    // NOTE: AGS solver used by us can handle nonsmooth and nonconvex
    +    //       optimization problems. It has convergence guarantees, i.e. it will
    +    //       converge to stationary point of the function after running for some
    +    //       time.
    +    //
    +    //       However, it is important to remember that "stationary point" is not
    +    //       equal to "solution". If your problem is convex, everything is OK.
    +    //       But nonconvex optimization problems may have "flat spots" - large
    +    //       areas where gradient is exactly zero, but function value is far away
    +    //       from optimal. Such areas are stationary points too, and optimizer
    +    //       may be trapped here.
    +    //
    +    //       "Flat spots" are nonsmooth equivalent of the saddle points, but with
    +    //       orders of magnitude worse properties - they may be quite large and
    +    //       hard to avoid. All nonsmooth optimizers are prone to this kind of the
    +    //       problem, because it is impossible to automatically distinguish "flat
    +    //       spot" from true solution.
    +    //
    +    //       This note is here to warn you that you should be very careful when
    +    //       you solve nonsmooth optimization problems. Visual inspection of
    +    //       results is essential.
    +    //
    +    alglib::minnsoptimize(state, nsfunc1_jac);
    +    minnsresults(state, x1, rep);
    +    printf("%s\n", x1.tostring(2).c_str()); // EXPECTED: [0.0000,0.0000]
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +This structure stores optimization report: +* InnerIterationsCount number of inner iterations +* OuterIterationsCount number of outer iterations +* NCholesky number of Cholesky decomposition +* NMV number of matrix-vector products + (only products calculated as part of iterative + process are counted) +* TerminationType completion code (see below) +* LagBC Lagrange multipliers for box constraints, + array[N], not filled by QP-BLEIC solver +* LagLC Lagrange multipliers for linear constraints, + array[MSparse+MDense], ignored by QP-BLEIC solver + +=== COMPLETION CODES ===================================================== + +Completion codes: +* -9 failure of the automatic scale evaluation: one of the diagonal + elements of the quadratic term is non-positive. Specify variable + scales manually! +* -5 inappropriate solver was used: + * QuickQP solver for problem with general linear constraints (dense/sparse) +* -4 BLEIC-QP or QuickQP solver found unconstrained direction + of negative curvature (function is unbounded from + below even under constraints), no meaningful + minimum can be found. +* -3 inconsistent constraints (or, maybe, feasible point is + too hard to find). If you are sure that constraints are feasible, + try to restart optimizer with better initial approximation. +* -1 solver error +* 1..4 successful completion +* 5 MaxIts steps was taken +* 7 stopping conditions are too stringent, + further improvement is impossible, + X contains best point found so far. + +=== LAGRANGE MULTIPLIERS ================================================= + +Some optimizers report values of Lagrange multipliers on successful +completion (positive completion code): +* DENSE-IPM-QP and SPARSE-IPM-QP return very precise Lagrange multipliers + as determined during solution process. +* DENSE-AUL-QP returns approximate Lagrange multipliers (which are very + close to "true" Lagrange multipliers except for overconstrained or + degenerate problems) + +Two arrays of multipliers are returned: +* LagBC is array[N] which is loaded with multipliers from box constraints; + LagBC[i]>0 means that I-th constraint is at the upper bound, LagBC[I]<0 + means that I-th constraint is at the lower bound, LagBC[I]=0 means that + I-th box constraint is inactive. +* LagLC is array[MSparse+MDense] which is loaded with multipliers from + general linear constraints (former MSparse elements corresponds to + sparse part of the constraint matrix, latter MDense are for the dense + constraints, as was specified by user). + LagLC[i]>0 means that I-th constraint at the upper bound, LagLC[i]<0 + means that I-th constraint is at the lower bound, LagLC[i]=0 means that + I-th linear constraint is inactive. + +On failure (or when optimizer does not support Lagrange multipliers) these +arrays are zero-filled. + +NOTE: methods from IPM family may also return meaningful Lagrange + multipliers on completion with codes -3 (infeasibility detected) and + -4 (unboundedness detected). It is possible that seeming + infeasibility/unboundedness of the problem is due to rounding errors + In this case last values of Lagrange multipliers are returned. +*************************************************************************/ +
    class minqpreport +{ + ae_int_t inneriterationscount; + ae_int_t outeriterationscount; + ae_int_t nmv; + ae_int_t ncholesky; + ae_int_t terminationtype; + real_1d_array lagbc; + real_1d_array laglc; +}; + +
    + +
    +
    /************************************************************************* +This object stores nonlinear optimizer state. +You should use functions provided by MinQP subpackage to work with this +object +*************************************************************************/ +
    class minqpstate +{ +}; + +
    + +
    +
    /************************************************************************* +This function appends two-sided linear constraint AL <= A*x <= AU to the +list of currently present sparse constraints. + +Constraint is passed in compressed format: as list of non-zero entries of +coefficient vector A. Such approach is more efficient than dense storage +for highly sparse constraint vectors. + +INPUT PARAMETERS: + State - structure previously allocated with minqpcreate() call. + IdxA - array[NNZ], indexes of non-zero elements of A: + * can be unsorted + * can include duplicate indexes (corresponding entries of + ValA[] will be summed) + ValA - array[NNZ], values of non-zero elements of A + NNZ - number of non-zero coefficients in A + AL, AU - lower and upper bounds; + * AL=AU => equality constraint A*x + * AL<AU => two-sided constraint AL<=A*x<=AU + * AL=-INF => one-sided constraint A*x<=AU + * AU=+INF => one-sided constraint AL<=A*x + * AL=-INF, AU=+INF => constraint is ignored + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpaddlc2( + minqpstate state, + integer_1d_array idxa, + real_1d_array vala, + ae_int_t nnz, + double al, + double au, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function appends two-sided linear constraint AL <= A*x <= AU to the +list of currently present dense constraints. + +INPUT PARAMETERS: + State - structure previously allocated with minqpcreate() call. + A - linear constraint coefficient, array[N], right side is NOT + included. + AL, AU - lower and upper bounds; + * AL=AU => equality constraint Ai*x + * AL<AU => two-sided constraint AL<=A*x<=AU + * AL=-INF => one-sided constraint Ai*x<=AU + * AU=+INF => one-sided constraint AL<=Ai*x + * AL=-INF, AU=+INF => constraint is ignored + + -- ALGLIB -- + Copyright 19.07.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpaddlc2dense( + minqpstate state, + real_1d_array a, + double al, + double au, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* + CONSTRAINED QUADRATIC PROGRAMMING + +The subroutine creates QP optimizer. After initial creation, it contains +default optimization problem with zero quadratic and linear terms and no +constraints. + +In order to actually solve something you should: +* set cost vector with minqpsetlinearterm() +* set variable bounds with minqpsetbc() or minqpsetbcall() +* specify constraint matrix with one of the following functions: + * modern API: + * minqpsetlc2() for sparse two-sided constraints AL <= A*x <= AU + * minqpsetlc2dense() for dense two-sided constraints AL <= A*x <= AU + * minqpsetlc2mixed() for mixed two-sided constraints AL <= A*x <= AU + * minqpaddlc2dense() to add one dense row to dense constraint submatrix + * minqpaddlc2() to add one sparse row to sparse constraint submatrix + * legacy API: + * minqpsetlc() for dense one-sided equality/inequality constraints + * minqpsetlcsparse() for sparse one-sided equality/inequality constraints + * minqpsetlcmixed() for mixed dense/sparse one-sided equality/inequality constraints +* choose appropriate QP solver and set it and its stopping criteria by + means of minqpsetalgo??????() function +* call minqpoptimize() to run the solver and minqpresults() to get the + solution vector and additional information. + +Following solvers are recommended for convex and semidefinite problems: +* QuickQP for dense problems with box-only constraints (or no constraints + at all) +* DENSE-IPM-QP for convex or semidefinite problems with medium (up + to several thousands) variable count, dense/sparse quadratic term and + any number (up to many thousands) of dense/sparse general linear + constraints +* SPARSE-IPM-QP for convex or semidefinite problems with large (many + thousands) variable count, sparse quadratic term AND linear constraints. + +If your problem happens to be nonconvex, but either (a) is effectively +convexified under constraints, or (b) has unique solution even with +nonconvex target, then you can use: +* QuickQP for dense nonconvex problems with box-only constraints +* DENSE-AUL-QP for dense nonconvex problems which are effectively + convexified under constraints with up to several thousands of variables + and any (small or large) number of general linear constraints +* QP-BLEIC for dense/sparse problems with small (up to several hundreds) + number of general linear constraints and arbitrarily large variable + count. + +INPUT PARAMETERS: + N - problem size + +OUTPUT PARAMETERS: + State - optimizer with zero quadratic/linear terms + and no constraints + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpcreate( + ae_int_t n, + minqpstate& state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +This function solves quadratic programming problem. + +Prior to calling this function you should choose solver by means of one of +the following functions: + +* minqpsetalgoquickqp() - for QuickQP solver +* minqpsetalgobleic() - for BLEIC-QP solver +* minqpsetalgodenseaul() - for Dense-AUL-QP solver +* minqpsetalgodenseipm() - for Dense-IPM-QP solver + +These functions also allow you to control stopping criteria of the solver. +If you did not set solver, MinQP subpackage will automatically select +solver for your problem and will run it with default stopping criteria. + +However, it is better to set explicitly solver and its stopping criteria. + +INPUT PARAMETERS: + State - algorithm state + +You should use MinQPResults() function to access results after calls +to this function. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey. + Special thanks to Elvira Illarionova for important suggestions on + the linearly constrained QP algorithm. +*************************************************************************/ +
    void alglib::minqpoptimize( + minqpstate state, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +QP solver results + +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + X - array[0..N-1], solution. + This array is allocated and initialized only when + Rep.TerminationType parameter is positive (success). + Rep - optimization report, contains: + * completion code in Rep.TerminationType (positive values + denote some kind of success, negative - failures) + * Lagrange multipliers - for QP solvers which support then + * other statistics + See comments on minqpreport structure for more information + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpresults( + minqpstate state, + real_1d_array& x, + minqpreport& rep, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +QP results + +Buffered implementation of MinQPResults() which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpresultsbuf( + minqpstate state, + real_1d_array& x, + minqpreport& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells solver to use BLEIC-based algorithm and sets stopping +criteria for the algorithm. + +This algorithm is intended for large-scale problems, possibly nonconvex, +with small number of general linear constraints. Feasible initial point is +essential for good performance. + +IMPORTANT: when DENSE-IPM (or DENSE-AUL for nonconvex problems) solvers + are applicable, their performance is much better than that of + BLEIC-QP. + We recommend you to use BLEIC only when other solvers can not + be used. + +ALGORITHM FEATURES: + +* supports dense and sparse QP problems +* supports box and general linear equality/inequality constraints +* can solve all types of problems (convex, semidefinite, nonconvex) as + long as they are bounded from below under constraints. + Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". + Of course, global minimum is found only for positive definite and + semidefinite problems. As for indefinite ones - only local minimum is + found. + +ALGORITHM OUTLINE: + +* BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves + quadratic programming problem as general linearly constrained + optimization problem, which is solved by means of BLEIC solver (part of + ALGLIB, active set method). + +ALGORITHM LIMITATIONS: +* This algorithm is inefficient on problems with hundreds and thousands + of general inequality constraints and infeasible initial point. Initial + feasibility detection stage may take too long on such constraint sets. + Consider using DENSE-IPM or DENSE-AUL instead. +* unlike QuickQP solver, this algorithm does not perform Newton steps and + does not use Level 3 BLAS. Being general-purpose active set method, it + can activate constraints only one-by-one. Thus, its performance is lower + than that of QuickQP. +* its precision is also a bit inferior to that of QuickQP. BLEIC-QP + performs only LBFGS steps (no Newton steps), which are good at detecting + neighborhood of the solution, buy needs many iterations to find solution + with more than 6 digits of precision. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsG - >=0 + The subroutine finishes its work if the condition + |v|<EpsG is satisfied, where: + * |.| means Euclidian norm + * v - scaled constrained gradient vector, v[i]=g[i]*s[i] + * g - gradient + * s - scaling coefficients set by MinQPSetScale() + EpsF - >=0 + The subroutine finishes its work if exploratory steepest + descent step on k+1-th iteration satisfies following + condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} + EpsX - >=0 + The subroutine finishes its work if exploratory steepest + descent step on k+1-th iteration satisfies following + condition: + * |.| means Euclidian norm + * v - scaled step vector, v[i]=dx[i]/s[i] + * dx - step vector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinQPSetScale() + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. NOTE: this algorithm uses LBFGS + iterations, which are relatively cheap, but improve + function value only a bit. So you will need many iterations + to converge - from 0.1*N to 10*N, depending on problem's + condition number. + +IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM +BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! + +Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead +to automatic stopping criterion selection (presently it is small step +length, but it may change in the future versions of ALGLIB). + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetalgobleic( + minqpstate state, + double epsg, + double epsf, + double epsx, + ae_int_t maxits, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells QP solver to use DENSE-AUL algorithm and sets stopping +criteria for the algorithm. + +This algorithm is intended for non-convex problems with moderate (up +to several thousands) variable count and arbitrary number of constraints +which are either (a) effectively convexified under constraints or (b) have +unique solution even with nonconvex target. + +IMPORTANT: when DENSE-IPM solver is applicable, its performance is usually + much better than that of DENSE-AUL. + We recommend you to use DENSE-AUL only when other solvers can + not be used. + +ALGORITHM FEATURES: + +* supports box and dense/sparse general linear equality/inequality + constraints +* convergence is theoretically proved for positive-definite (convex) QP + problems. Semidefinite and non-convex problems can be solved as long as + they are bounded from below under constraints, although without + theoretical guarantees. + +ALGORITHM OUTLINE: + +* this algorithm is an augmented Lagrangian method with dense + preconditioner (hence its name). +* it performs several outer iterations in order to refine values of the + Lagrange multipliers. Single outer iteration is a solution of some + unconstrained optimization problem: first it performs dense Cholesky + factorization of the Hessian in order to build preconditioner (adaptive + regularization is applied to enforce positive definiteness), and then + it uses L-BFGS optimizer to solve optimization problem. +* typically you need about 5-10 outer iterations to converge to solution + +ALGORITHM LIMITATIONS: + +* because dense Cholesky driver is used, this algorithm has O(N^2) memory + requirements and O(OuterIterations*N^3) minimum running time. From the + practical point of view, it limits its applicability by several + thousands of variables. + From the other side, variables count is the most limiting factor, + and dependence on constraint count is much more lower. Assuming that + constraint matrix is sparse, it may handle tens of thousands of general + linear constraints. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsX - >=0, stopping criteria for inner optimizer. + Inner iterations are stopped when step length (with + variable scaling being applied) is less than EpsX. + See minqpsetscale() for more information on variable + scaling. + Rho - penalty coefficient, Rho>0: + * large enough that algorithm converges with desired + precision. + * not TOO large to prevent ill-conditioning + * recommended values are 100, 1000 or 10000 + ItsCnt - number of outer iterations: + * recommended values: 10-15 (although in most cases it + converges within 5 iterations, you may need a few more + to be sure). + * ItsCnt=0 means that small number of outer iterations is + automatically chosen (10 iterations in current version). + * ItsCnt=1 means that AUL algorithm performs just as usual + penalty method. + * ItsCnt>1 means that AUL algorithm performs specified + number of outer iterations + +IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM +BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! + +NOTE: Passing EpsX=0 will lead to automatic step length selection + (specific step length chosen may change in the future versions of + ALGLIB, so it is better to specify step length explicitly). + + -- ALGLIB -- + Copyright 20.08.2016 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetalgodenseaul( + minqpstate state, + double epsx, + double rho, + ae_int_t itscnt, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells QP solver to use DENSE-IPM QP algorithm and sets +stopping criteria for the algorithm. + +This algorithm is intended for convex and semidefinite problems with +moderate (up to several thousands) variable count and arbitrary number of +constraints. + +IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL + or BLEIC-QP instead. If you try to run DENSE-IPM on problem + with indefinite matrix (matrix having at least one negative + eigenvalue) then depending on circumstances it may either (a) + stall at some arbitrary point, or (b) throw exception on + failure of Cholesky decomposition. + +ALGORITHM FEATURES: + +* supports box and dense/sparse general linear equality/inequality + constraints + +ALGORITHM OUTLINE: + +* this algorithm is an implementation of interior point method as + formulated by R.J.Vanderbei, with minor modifications to the algorithm + (damped Newton directions are extensively used) +* like all interior point methods, this algorithm tends to converge in + roughly same number of iterations (between 15 and 30) independently from + the problem dimensionality + +ALGORITHM LIMITATIONS: + +* because dense Cholesky driver is used, for N-dimensional problem with + M dense constaints this algorithm has O(N^2+N*M) memory requirements and + O(N^3+N*M^2) running time. + Having sparse constraints with Z nonzeros per row relaxes storage and + running time down to O(N^2+M*Z) and O(N^3+N*Z^2) + From the practical point of view, it limits its applicability by + several thousands of variables. + From the other side, variables count is the most limiting factor, + and dependence on constraint count is much more lower. Assuming that + constraint matrix is sparse, it may handle tens of thousands of general + linear constraints. + +INPUT PARAMETERS: + State - structure which stores algorithm state + Eps - >=0, stopping criteria. The algorithm stops when primal + and dual infeasiblities as well as complementarity gap are + less than Eps. + +IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM +BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! + +NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon. + +===== TRACING IPM SOLVER ================================================= + +IPM solver supports advanced tracing capabilities. You can trace algorithm +output by specifying following trace symbols (case-insensitive) by means +of trace_file() call: +* 'IPM' - for basic trace of algorithm steps and decisions. Only + short scalars (function values and deltas) are printed. + N-dimensional quantities like search directions are NOT + printed. +* 'IPM.DETAILED'- for output of points being visited and search directions + This symbol also implicitly defines 'IPM'. You can + control output format by additionally specifying: + * nothing to output in 6-digit exponential format + * 'PREC.E15' to output in 15-digit exponential format + * 'PREC.F6' to output in 6-digit fixed-point format + +By default trace is disabled and adds no overhead to the optimization +process. However, specifying any of the symbols adds some formatting and +output-related overhead. + +You may specify multiple symbols by separating them with commas: +> +> alglib::trace_file("IPM.DETAILED,PREC.F6", "path/to/trace.log") +> + + -- ALGLIB -- + Copyright 01.11.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetalgodenseipm( + minqpstate state, + double eps, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells solver to use QuickQP algorithm: special extra-fast +algorithm for problems with box-only constrants. It may solve non-convex +problems as long as they are bounded from below under constraints. + +ALGORITHM FEATURES: +* several times faster than DENSE-IPM when running on box-only problem +* utilizes accelerated methods for activation of constraints. +* supports dense and sparse QP problems +* supports ONLY box constraints; general linear constraints are NOT + supported by this solver +* can solve all types of problems (convex, semidefinite, nonconvex) as + long as they are bounded from below under constraints. + Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". + In convex/semidefinite case global minimum is returned, in nonconvex + case - algorithm returns one of the local minimums. + +ALGORITHM OUTLINE: + +* algorithm performs two kinds of iterations: constrained CG iterations + and constrained Newton iterations +* initially it performs small number of constrained CG iterations, which + can efficiently activate/deactivate multiple constraints +* after CG phase algorithm tries to calculate Cholesky decomposition and + to perform several constrained Newton steps. If Cholesky decomposition + failed (matrix is indefinite even under constraints), we perform more + CG iterations until we converge to such set of constraints that system + matrix becomes positive definite. Constrained Newton steps greatly + increase convergence speed and precision. +* algorithm interleaves CG and Newton iterations which allows to handle + indefinite matrices (CG phase) and quickly converge after final set of + constraints is found (Newton phase). Combination of CG and Newton phases + is called "outer iteration". +* it is possible to turn off Newton phase (beneficial for semidefinite + problems - Cholesky decomposition will fail too often) + +ALGORITHM LIMITATIONS: + +* algorithm does not support general linear constraints; only box ones + are supported +* Cholesky decomposition for sparse problems is performed with Skyline + Cholesky solver, which is intended for low-profile matrices. No profile- + reducing reordering of variables is performed in this version of ALGLIB. +* problems with near-zero negative eigenvalues (or exacty zero ones) may + experience about 2-3x performance penalty. The reason is that Cholesky + decomposition can not be performed until we identify directions of zero + and negative curvature and activate corresponding boundary constraints - + but we need a lot of trial and errors because these directions are hard + to notice in the matrix spectrum. + In this case you may turn off Newton phase of algorithm. + Large negative eigenvalues are not an issue, so highly non-convex + problems can be solved very efficiently. + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsG - >=0 + The subroutine finishes its work if the condition + |v|<EpsG is satisfied, where: + * |.| means Euclidian norm + * v - scaled constrained gradient vector, v[i]=g[i]*s[i] + * g - gradient + * s - scaling coefficients set by MinQPSetScale() + EpsF - >=0 + The subroutine finishes its work if exploratory steepest + descent step on k+1-th iteration satisfies following + condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} + EpsX - >=0 + The subroutine finishes its work if exploratory steepest + descent step on k+1-th iteration satisfies following + condition: + * |.| means Euclidian norm + * v - scaled step vector, v[i]=dx[i]/s[i] + * dx - step vector, dx=X(k+1)-X(k) + * s - scaling coefficients set by MinQPSetScale() + MaxOuterIts-maximum number of OUTER iterations. One outer iteration + includes some amount of CG iterations (from 5 to ~N) and + one or several (usually small amount) Newton steps. Thus, + one outer iteration has high cost, but can greatly reduce + funcation value. + Use 0 if you do not want to limit number of outer iterations. + UseNewton- use Newton phase or not: + * Newton phase improves performance of positive definite + dense problems (about 2 times improvement can be observed) + * can result in some performance penalty on semidefinite + or slightly negative definite problems - each Newton + phase will bring no improvement (Cholesky failure), but + still will require computational time. + * if you doubt, you can turn off this phase - optimizer + will retain its most of its high speed. + +IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM +BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! + +Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead +to automatic stopping criterion selection (presently it is small step +length, but it may change in the future versions of ALGLIB). + + -- ALGLIB -- + Copyright 22.05.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetalgoquickqp( + minqpstate state, + double epsg, + double epsf, + double epsx, + ae_int_t maxouterits, + bool usenewton, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function tells QP solver to use SPARSE-IPM QP algorithm and sets +stopping criteria for the algorithm. + +This algorithm is intended for convex and semidefinite problems with +large variable and constraint count and sparse quadratic term and +constraints. It is possible to have some limited set of dense linear +constraints - they will be handled separately by dense BLAS - but the more +dense constraints you have, the more time solver needs. + +IMPORTANT: internally this solver performs large and sparse (N+M)x(N+M) + triangular factorization. So it expects both quadratic term and + constraints to be highly sparse. However, its running time is + influenced by BOTH fill factor and sparsity pattern. + + Generally we expect that no more than few nonzero elements per + row are present. However different sparsity patterns may result + in completely different running times even given same fill + factor. + + In many cases this algorithm outperforms DENSE-IPM by order of + magnitude. However, in some cases you may get better results + with DENSE-IPM even when solving sparse task. + +IMPORTANT: this algorithm won't work for nonconvex problems, use DENSE-AUL + or BLEIC-QP instead. If you try to run DENSE-IPM on problem + with indefinite matrix (matrix having at least one negative + eigenvalue) then depending on circumstances it may either (a) + stall at some arbitrary point, or (b) throw exception on + failure of Cholesky decomposition. + +ALGORITHM FEATURES: + +* supports box and dense/sparse general linear equality/inequality + constraints +* specializes on large-scale sparse problems + +ALGORITHM OUTLINE: + +* this algorithm is an implementation of interior point method as + formulated by R.J.Vanderbei, with minor modifications to the algorithm + (damped Newton directions are extensively used) +* like all interior point methods, this algorithm tends to converge in + roughly same number of iterations (between 15 and 30) independently from + the problem dimensionality + +ALGORITHM LIMITATIONS: + +* this algorithm may handle moderate number of dense constraints, usually + no more than a thousand of dense ones without losing its efficiency. + +INPUT PARAMETERS: + State - structure which stores algorithm state + Eps - >=0, stopping criteria. The algorithm stops when primal + and dual infeasiblities as well as complementarity gap are + less than Eps. + +IT IS VERY IMPORTANT TO CALL minqpsetscale() WHEN YOU USE THIS ALGORITHM +BECAUSE ITS CONVERGENCE PROPERTIES AND STOPPING CRITERIA ARE SCALE-DEPENDENT! + +NOTE: Passing EpsX=0 will lead to automatic selection of small epsilon. + + -- ALGLIB -- + Copyright 01.11.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetalgosparseipm( + minqpstate state, + double eps, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets box constraints for QP solver + +Box constraints are inactive by default (after initial creation). After +being set, they are preserved until explicitly overwritten with another +minqpsetbc() or minqpsetbcall() call, or partially overwritten with +minqpsetbci() call. + +Following types of constraints are supported: + + DESCRIPTION CONSTRAINT HOW TO SPECIFY + fixed variable x[i]=Bnd[i] BndL[i]=BndU[i] + lower bound BndL[i]<=x[i] BndU[i]=+INF + upper bound x[i]<=BndU[i] BndL[i]=-INF + range BndL[i]<=x[i]<=BndU[i] ... + free variable - BndL[I]=-INF, BndU[I]+INF + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bounds, array[N]. + If some (all) variables are unbounded, you may specify + very small number or -INF (latter is recommended because + it will allow solver to use better algorithm). + BndU - upper bounds, array[N]. + If some (all) variables are unbounded, you may specify + very large number or +INF (latter is recommended because + it will allow solver to use better algorithm). + +NOTE: infinite values can be specified by means of Double.PositiveInfinity + and Double.NegativeInfinity (in C#) and alglib::fp_posinf and + alglib::fp_neginf (in C++). + +NOTE: you may replace infinities by very small/very large values, but it + is not recommended because large numbers may introduce large numerical + errors in the algorithm. + +NOTE: if constraints for all variables are same you may use minqpsetbcall() + which allows to specify constraints without using arrays. + +NOTE: BndL>BndU will result in QP problem being recognized as infeasible. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetbc( + minqpstate state, + real_1d_array bndl, + real_1d_array bndu, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets box constraints for QP solver (all variables at once, +same constraints for all variables) + +Box constraints are inactive by default (after initial creation). After +being set, they are preserved until explicitly overwritten with another +minqpsetbc() or minqpsetbcall() call, or partially overwritten with +minqpsetbci() call. + +Following types of constraints are supported: + + DESCRIPTION CONSTRAINT HOW TO SPECIFY + fixed variable x[i]=Bnd BndL=BndU + lower bound BndL<=x[i] BndU=+INF + upper bound x[i]<=BndU BndL=-INF + range BndL<=x[i]<=BndU ... + free variable - BndL=-INF, BndU+INF + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bound, same for all variables + BndU - upper bound, same for all variables + +NOTE: infinite values can be specified by means of Double.PositiveInfinity + and Double.NegativeInfinity (in C#) and alglib::fp_posinf and + alglib::fp_neginf (in C++). + +NOTE: you may replace infinities by very small/very large values, but it + is not recommended because large numbers may introduce large numerical + errors in the algorithm. + +NOTE: BndL>BndU will result in QP problem being recognized as infeasible. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetbcall( + minqpstate state, + double bndl, + double bndu, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets box constraints for I-th variable (other variables are +not modified). + +Following types of constraints are supported: + + DESCRIPTION CONSTRAINT HOW TO SPECIFY + fixed variable x[i]=Bnd BndL=BndU + lower bound BndL<=x[i] BndU=+INF + upper bound x[i]<=BndU BndL=-INF + range BndL<=x[i]<=BndU ... + free variable - BndL=-INF, BndU+INF + +INPUT PARAMETERS: + State - structure stores algorithm state + BndL - lower bound + BndU - upper bound + +NOTE: infinite values can be specified by means of Double.PositiveInfinity + and Double.NegativeInfinity (in C#) and alglib::fp_posinf and + alglib::fp_neginf (in C++). + +NOTE: you may replace infinities by very small/very large values, but it + is not recommended because large numbers may introduce large numerical + errors in the algorithm. + +NOTE: BndL>BndU will result in QP problem being recognized as infeasible. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetbci( + minqpstate state, + ae_int_t i, + double bndl, + double bndu, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets dense linear constraints for QP optimizer. + +This function overrides results of previous calls to minqpsetlc(), +minqpsetlcsparse() and minqpsetlcmixed(). After call to this function +all non-box constraints are dropped, and you have only those constraints +which were specified in the present call. + +If you want to specify mixed (with dense and sparse terms) linear +constraints, you should call minqpsetlcmixed(). + +INPUT PARAMETERS: + State - structure previously allocated with MinQPCreate call. + C - linear constraints, array[K,N+1]. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] + K - number of equality/inequality constraints, K>=0: + * if given, only leading K elements of C/CT are used + * if not given, automatically determined from sizes of C/CT + +NOTE 1: linear (non-bound) constraints are satisfied only approximately - + there always exists some violation due to numerical errors and + algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP + solver is less precise). + + -- ALGLIB -- + Copyright 19.06.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlc( + minqpstate state, + real_2d_array c, + integer_1d_array ct, + const xparams _params = alglib::xdefault); +void alglib::minqpsetlc( + minqpstate state, + real_2d_array c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets two-sided linear constraints AL <= A*x <= AU with +sparse constraining matrix A. Recommended for large-scale problems. + +This function overwrites linear (non-box) constraints set by previous +calls (if such calls were made). + +INPUT PARAMETERS: + State - structure previously allocated with minqpcreate() call. + A - sparse matrix with size [K,N] (exactly!). + Each row of A represents one general linear constraint. + A can be stored in any sparse storage format. + AL, AU - lower and upper bounds, array[K]; + * AL[i]=AU[i] => equality constraint Ai*x + * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i] + * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] + * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x + * AL[i]=-INF, AU[i]=+INF => constraint is ignored + K - number of equality/inequality constraints, K>=0. If K=0 + is specified, A, AL, AU are ignored. + + -- ALGLIB -- + Copyright 01.11.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlc2( + minqpstate state, + sparsematrix a, + real_1d_array al, + real_1d_array au, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets two-sided linear constraints AL <= A*x <= AU with dense +constraint matrix A. + +NOTE: knowing that constraint matrix is dense helps some QP solvers + (especially modern IPM method) to utilize efficient dense Level 3 + BLAS for dense parts of the problem. If your problem has both dense + and sparse constraints, you can use minqpsetlc2mixed() function, + which will result in dense algebra being applied to dense terms, and + sparse sparse linear algebra applied to sparse terms. + +INPUT PARAMETERS: + State - structure previously allocated with minqpcreate() call. + A - linear constraints, array[K,N]. Each row of A represents + one constraint. One-sided inequality constraints, two- + sided inequality constraints, equality constraints are + supported (see below) + AL, AU - lower and upper bounds, array[K]; + * AL[i]=AU[i] => equality constraint Ai*x + * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i] + * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] + * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x + * AL[i]=-INF, AU[i]=+INF => constraint is ignored + K - number of equality/inequality constraints, K>=0; if not + given, inferred from sizes of A, AL, AU. + + -- ALGLIB -- + Copyright 01.11.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlc2dense( + minqpstate state, + real_2d_array a, + real_1d_array al, + real_1d_array au, + const xparams _params = alglib::xdefault); +void alglib::minqpsetlc2dense( + minqpstate state, + real_2d_array a, + real_1d_array al, + real_1d_array au, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets two-sided linear constraints AL <= A*x <= AU with +mixed constraining matrix A including sparse part (first SparseK rows) and +dense part (last DenseK rows). Recommended for large-scale problems. + +This function overwrites linear (non-box) constraints set by previous +calls (if such calls were made). + +This function may be useful if constraint matrix includes large number of +both types of rows - dense and sparse. If you have just a few sparse rows, +you may represent them in dense format without loosing performance. +Similarly, if you have just a few dense rows, you may store them in sparse +format with almost same performance. + +INPUT PARAMETERS: + State - structure previously allocated with minqpcreate() call. + SparseA - sparse matrix with size [K,N] (exactly!). + Each row of A represents one general linear constraint. + A can be stored in any sparse storage format. + SparseK - number of sparse constraints, SparseK>=0 + DenseA - linear constraints, array[K,N], set of dense constraints. + Each row of A represents one general linear constraint. + DenseK - number of dense constraints, DenseK>=0 + AL, AU - lower and upper bounds, array[SparseK+DenseK], with former + SparseK elements corresponding to sparse constraints, and + latter DenseK elements corresponding to dense constraints; + * AL[i]=AU[i] => equality constraint Ai*x + * AL[i]<AU[i] => two-sided constraint AL[i]<=Ai*x<=AU[i] + * AL[i]=-INF => one-sided constraint Ai*x<=AU[i] + * AU[i]=+INF => one-sided constraint AL[i]<=Ai*x + * AL[i]=-INF, AU[i]=+INF => constraint is ignored + K - number of equality/inequality constraints, K>=0. If K=0 + is specified, A, AL, AU are ignored. + + -- ALGLIB -- + Copyright 01.11.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlc2mixed( + minqpstate state, + sparsematrix sparsea, + ae_int_t ksparse, + real_2d_array densea, + ae_int_t kdense, + real_1d_array al, + real_1d_array au, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets mixed linear constraints, which include a set of dense +rows, and a set of sparse rows. + +This function overrides results of previous calls to minqpsetlc(), +minqpsetlcsparse() and minqpsetlcmixed(). + +This function may be useful if constraint matrix includes large number of +both types of rows - dense and sparse. If you have just a few sparse rows, +you may represent them in dense format without loosing performance. +Similarly, if you have just a few dense rows, you may store them in sparse +format with almost same performance. + +INPUT PARAMETERS: + State - structure previously allocated with MinQPCreate call. + SparseC - linear constraints, sparse matrix with dimensions EXACTLY + EQUAL TO [SparseK,N+1]. Each row of C represents one + constraint, either equality or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + SparseCT- type of sparse constraints, array[K]: + * if SparseCT[i]>0, then I-th constraint is SparseC[i,*]*x >= SparseC[i,n+1] + * if SparseCT[i]=0, then I-th constraint is SparseC[i,*]*x = SparseC[i,n+1] + * if SparseCT[i]<0, then I-th constraint is SparseC[i,*]*x <= SparseC[i,n+1] + SparseK - number of sparse equality/inequality constraints, K>=0 + DenseC - dense linear constraints, array[K,N+1]. + Each row of DenseC represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of DenseC (including right part) must be finite. + DenseCT - type of constraints, array[K]: + * if DenseCT[i]>0, then I-th constraint is DenseC[i,*]*x >= DenseC[i,n+1] + * if DenseCT[i]=0, then I-th constraint is DenseC[i,*]*x = DenseC[i,n+1] + * if DenseCT[i]<0, then I-th constraint is DenseC[i,*]*x <= DenseC[i,n+1] + DenseK - number of equality/inequality constraints, DenseK>=0 + +NOTE 1: linear (non-box) constraints are satisfied only approximately - + there always exists some violation due to numerical errors and + algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP + solver is less precise). + +NOTE 2: due to backward compatibility reasons SparseC can be larger than + [SparseK,N+1]. In this case only leading [SparseK,N+1] submatrix + will be used. However, the rest of ALGLIB has more strict + requirements on the input size, so we recommend you to pass sparse + term whose size exactly matches algorithm expectations. + + -- ALGLIB -- + Copyright 22.08.2016 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlcmixed( + minqpstate state, + sparsematrix sparsec, + integer_1d_array sparsect, + ae_int_t sparsek, + real_2d_array densec, + integer_1d_array densect, + ae_int_t densek, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function provides legacy API for specification of mixed dense/sparse +linear constraints. + +New conventions used by ALGLIB since release 3.16.0 state that set of +sparse constraints comes first, followed by set of dense ones. This +convention is essential when you talk about things like order of Lagrange +multipliers. + +However, legacy API accepted mixed constraints in reverse order. This +function is here to simplify situation with code relying on legacy API. It +simply accepts constraints in one order (old) and passes them to new API, +now in correct order. + + -- ALGLIB -- + Copyright 01.11.2019 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlcmixedlegacy( + minqpstate state, + real_2d_array densec, + integer_1d_array densect, + ae_int_t densek, + sparsematrix sparsec, + integer_1d_array sparsect, + ae_int_t sparsek, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets sparse linear constraints for QP optimizer. + +This function overrides results of previous calls to minqpsetlc(), +minqpsetlcsparse() and minqpsetlcmixed(). After call to this function +all non-box constraints are dropped, and you have only those constraints +which were specified in the present call. + +If you want to specify mixed (with dense and sparse terms) linear +constraints, you should call minqpsetlcmixed(). + +INPUT PARAMETERS: + State - structure previously allocated with MinQPCreate call. + C - linear constraints, sparse matrix with dimensions at + least [K,N+1]. If matrix has larger size, only leading + Kx(N+1) rectangle is used. + Each row of C represents one constraint, either equality + or inequality (see below): + * first N elements correspond to coefficients, + * last element corresponds to the right part. + All elements of C (including right part) must be finite. + CT - type of constraints, array[K]: + * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] + * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] + * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] + K - number of equality/inequality constraints, K>=0 + +NOTE 1: linear (non-bound) constraints are satisfied only approximately - + there always exists some violation due to numerical errors and + algorithmic limitations (BLEIC-QP solver is most precise, AUL-QP + solver is less precise). + + -- ALGLIB -- + Copyright 22.08.2016 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlcsparse( + minqpstate state, + sparsematrix c, + integer_1d_array ct, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets linear term for QP solver. + +By default, linear term is zero. + +INPUT PARAMETERS: + State - structure which stores algorithm state + B - linear term, array[N]. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetlinearterm( + minqpstate state, + real_1d_array b, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +
    /************************************************************************* +This function sets origin for QP solver. By default, following QP program +is solved: + + min(0.5*x'*A*x+b'*x) + +This function allows to solve different problem: + + min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) + +Specification of non-zero origin affects function being minimized, but not +constraints. Box and linear constraints are still calculated without +origin. + +INPUT PARAMETERS: + State - structure which stores algorithm state + XOrigin - origin, array[N]. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetorigin( + minqpstate state, + real_1d_array xorigin, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets dense quadratic term for QP solver. By default, +quadratic term is zero. + +IMPORTANT: + +This solver minimizes following function: + f(x) = 0.5*x'*A*x + b'*x. +Note that quadratic term has 0.5 before it. So if you want to minimize + f(x) = x^2 + x +you should rewrite your problem as follows: + f(x) = 0.5*(2*x^2) + x +and your matrix A will be equal to [[2.0]], not to [[1.0]] + +INPUT PARAMETERS: + State - structure which stores algorithm state + A - matrix, array[N,N] + IsUpper - (optional) storage type: + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used + * if not given, both lower and upper triangles must be + filled. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetquadraticterm( + minqpstate state, + real_2d_array a, + const xparams _params = alglib::xdefault); +void alglib::minqpsetquadraticterm( + minqpstate state, + real_2d_array a, + bool isupper, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  

    + +
    +
    /************************************************************************* +This function sets sparse quadratic term for QP solver. By default, +quadratic term is zero. This function overrides previous calls to +minqpsetquadraticterm() or minqpsetquadratictermsparse(). + +NOTE: dense solvers like DENSE-AUL-QP or DENSE-IPM-QP will convert this + matrix to dense storage anyway. + +IMPORTANT: + +This solver minimizes following function: + f(x) = 0.5*x'*A*x + b'*x. +Note that quadratic term has 0.5 before it. So if you want to minimize + f(x) = x^2 + x +you should rewrite your problem as follows: + f(x) = 0.5*(2*x^2) + x +and your matrix A will be equal to [[2.0]], not to [[1.0]] + +INPUT PARAMETERS: + State - structure which stores algorithm state + A - matrix, array[N,N] + IsUpper - (optional) storage type: + * if True, symmetric matrix A is given by its upper + triangle, and the lower triangle isn't used + * if False, symmetric matrix A is given by its lower + triangle, and the upper triangle isn't used + * if not given, both lower and upper triangles must be + filled. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetquadratictermsparse( + minqpstate state, + sparsematrix a, + bool isupper, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets scaling coefficients. + +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances) and as +preconditioner. + +Scale of the I-th variable is a translation invariant measure of: +a) "how large" the variable is +b) how large the step should be to make significant changes in the + function + +If you do not know how to choose scales of your variables, you can: +* read www.alglib.net/optimization/scaling.php article +* use minqpsetscaleautodiag(), which calculates scale using diagonal of + the quadratic term: S is set to 1/sqrt(diag(A)), which works well + sometimes. + +INPUT PARAMETERS: + State - structure stores algorithm state + S - array[N], non-zero scaling coefficients + S[i] may be negative, sign doesn't matter. + + -- ALGLIB -- + Copyright 14.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetscale( + minqpstate state, + real_1d_array s, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets automatic evaluation of variable scaling. + +IMPORTANT: this function works only for matrices with positive diagonal + elements! Zero or negative elements will result in -9 error + code being returned. Specify scale vector manually with + minqpsetscale() in such cases. + +ALGLIB optimizers use scaling matrices to test stopping conditions (step +size and gradient are scaled before comparison with tolerances) and as +preconditioner. + +The best way to set scaling is to manually specify variable scales. +However, sometimes you just need quick-and-dirty solution - either when +you perform fast prototyping, or when you know your problem well and you +are 100% sure that this quick solution is robust enough in your case. + +One such solution is to evaluate scale of I-th variable as 1/Sqrt(A[i,i]), +where A[i,i] is an I-th diagonal element of the quadratic term. + +Such approach works well sometimes, but you have to be careful here. + +INPUT PARAMETERS: + State - structure stores algorithm state + + -- ALGLIB -- + Copyright 26.12.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetscaleautodiag( + minqpstate state, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function sets starting point for QP solver. It is useful to have good +initial approximation to the solution, because it will increase speed of +convergence and identification of active constraints. + +NOTE: interior point solvers ignore initial point provided by user. + +INPUT PARAMETERS: + State - structure which stores algorithm state + X - starting point, array[N]. + + -- ALGLIB -- + Copyright 11.01.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::minqpsetstartingpoint( + minqpstate state, + real_1d_array x, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  

    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1
    +    // subject to bound constraints 0<=x0<=2.5, 0<=x1<=2.5
    +    //
    +    // Exact solution is [x0,x1] = [2.5,2]
    +    //
    +    // We provide algorithm with starting point. With such small problem good starting
    +    // point is not really necessary, but with high-dimensional problem it can save us
    +    // a lot of time.
    +    //
    +    // Several QP solvers are tried: QuickQP, BLEIC, DENSE-AUL.
    +    //
    +    // IMPORTANT: this solver minimizes  following  function:
    +    //     f(x) = 0.5*x'*A*x + b'*x.
    +    // Note that quadratic term has 0.5 before it. So if you want to minimize
    +    // quadratic function, you should rewrite it in such way that quadratic term
    +    // is multiplied by 0.5 too.
    +    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    +    //     f(x) = 0.5*(2*x0^2+2*x1^2) + ....
    +    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    +    //
    +    real_2d_array a = "[[2,0],[0,2]]";
    +    real_1d_array b = "[-6,-4]";
    +    real_1d_array x0 = "[0,1]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[0.0,0.0]";
    +    real_1d_array bndu = "[2.5,2.5]";
    +    real_1d_array x;
    +    minqpstate state;
    +    minqpreport rep;
    +
    +    // create solver, set quadratic/linear terms
    +    minqpcreate(2, state);
    +    minqpsetquadraticterm(state, a);
    +    minqpsetlinearterm(state, b);
    +    minqpsetstartingpoint(state, x0);
    +    minqpsetbc(state, bndl, bndu);
    +
    +    // Set scale of the parameters.
    +    // It is strongly recommended that you set scale of your variables.
    +    // Knowing their scales is essential for evaluation of stopping criteria
    +    // and for preconditioning of the algorithm steps.
    +    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    +    //
    +    // NOTE: for convex problems you may try using minqpsetscaleautodiag()
    +    //       which automatically determines variable scales.
    +    minqpsetscale(state, s);
    +
    +    //
    +    // Solve problem with QuickQP solver.
    +    //
    +    // This solver is intended for medium and large-scale problems with box
    +    // constraints (general linear constraints are not supported).
    +    //
    +    // Default stopping criteria are used, Newton phase is active.
    +    //
    +    minqpsetalgoquickqp(state, 0.0, 0.0, 0.0, 0, true);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2.5,2]
    +
    +    //
    +    // Solve problem with BLEIC-based QP solver.
    +    //
    +    // This solver is intended for problems with moderate (up to 50) number
    +    // of general linear constraints and unlimited number of box constraints.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2.5,2]
    +
    +    //
    +    // Solve problem with DENSE-AUL solver.
    +    //
    +    // This solver is optimized for problems with up to several thousands of
    +    // variables and large amount of general linear constraints. Problems with
    +    // less than 50 general linear constraints can be efficiently solved with
    +    // BLEIC, problems with box-only constraints can be solved with QuickQP.
    +    // However, DENSE-AUL will work in any (including unconstrained) case.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgodenseaul(state, 1.0e-9, 1.0e+4, 5);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2.5,2]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1
    +    // subject to linear constraint x0+x1<=2
    +    //
    +    // Exact solution is [x0,x1] = [1.5,0.5]
    +    //
    +    // IMPORTANT: this solver minimizes  following  function:
    +    //     f(x) = 0.5*x'*A*x + b'*x.
    +    // Note that quadratic term has 0.5 before it. So if you want to minimize
    +    // quadratic function, you should rewrite it in such way that quadratic term
    +    // is multiplied by 0.5 too.
    +    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    +    //     f(x) = 0.5*(2*x0^2+2*x1^2) + ....
    +    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    +    //
    +    real_2d_array a = "[[2,0],[0,2]]";
    +    real_1d_array b = "[-6,-4]";
    +    real_1d_array s = "[1,1]";
    +    real_2d_array c = "[[1.0,1.0,2.0]]";
    +    integer_1d_array ct = "[-1]";
    +    real_1d_array x;
    +    minqpstate state;
    +    minqpreport rep;
    +
    +    // create solver, set quadratic/linear terms
    +    minqpcreate(2, state);
    +    minqpsetquadraticterm(state, a);
    +    minqpsetlinearterm(state, b);
    +    minqpsetlc(state, c, ct);
    +
    +    // Set scale of the parameters.
    +    // It is strongly recommended that you set scale of your variables.
    +    // Knowing their scales is essential for evaluation of stopping criteria
    +    // and for preconditioning of the algorithm steps.
    +    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    +    //
    +    // NOTE: for convex problems you may try using minqpsetscaleautodiag()
    +    //       which automatically determines variable scales.
    +    minqpsetscale(state, s);
    +
    +    //
    +    // Solve problem with BLEIC-based QP solver.
    +    //
    +    // This solver is intended for problems with moderate (up to 50) number
    +    // of general linear constraints and unlimited number of box constraints.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(1).c_str()); // EXPECTED: [1.500,0.500]
    +
    +    //
    +    // Solve problem with DENSE-AUL solver.
    +    //
    +    // This solver is optimized for problems with up to several thousands of
    +    // variables and large amount of general linear constraints. Problems with
    +    // less than 50 general linear constraints can be efficiently solved with
    +    // BLEIC, problems with box-only constraints can be solved with QuickQP.
    +    // However, DENSE-AUL will work in any (including unconstrained) case.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgodenseaul(state, 1.0e-9, 1.0e+4, 5);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(1).c_str()); // EXPECTED: [1.500,0.500]
    +
    +    //
    +    // Solve problem with QuickQP solver.
    +    //
    +    // This solver is intended for medium and large-scale problems with box
    +    // constraints, and...
    +    //
    +    // ...Oops! It does not support general linear constraints, -5 returned as completion code!
    +    //
    +    minqpsetalgoquickqp(state, 0.0, 0.0, 0.0, 0, true);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: -5
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of nonconvex function
    +    //     F(x0,x1) = -(x0^2+x1^2)
    +    // subject to constraints x0,x1 in [1.0,2.0]
    +    // Exact solution is [x0,x1] = [2,2].
    +    //
    +    // Non-convex problems are harded to solve than convex ones, and they
    +    // may have more than one local minimum. However, ALGLIB solves may deal
    +    // with such problems (altough they do not guarantee convergence to
    +    // global minimum).
    +    //
    +    // IMPORTANT: this solver minimizes  following  function:
    +    //     f(x) = 0.5*x'*A*x + b'*x.
    +    // Note that quadratic term has 0.5 before it. So if you want to minimize
    +    // quadratic function, you should rewrite it in such way that quadratic term
    +    // is multiplied by 0.5 too.
    +    //
    +    // For example, our function is f(x)=-(x0^2+x1^2), but we rewrite it as 
    +    //     f(x) = 0.5*(-2*x0^2-2*x1^2)
    +    // and pass diag(-2,-2) as quadratic term - NOT diag(-1,-1)!
    +    //
    +    real_2d_array a = "[[-2,0],[0,-2]]";
    +    real_1d_array x0 = "[1,1]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array bndl = "[1.0,1.0]";
    +    real_1d_array bndu = "[2.0,2.0]";
    +    real_1d_array x;
    +    minqpstate state;
    +    minqpreport rep;
    +
    +    // create solver, set quadratic/linear terms, constraints
    +    minqpcreate(2, state);
    +    minqpsetquadraticterm(state, a);
    +    minqpsetstartingpoint(state, x0);
    +    minqpsetbc(state, bndl, bndu);
    +
    +    // Set scale of the parameters.
    +    // It is strongly recommended that you set scale of your variables.
    +    // Knowing their scales is essential for evaluation of stopping criteria
    +    // and for preconditioning of the algorithm steps.
    +    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    +    //
    +    // NOTE: there also exists minqpsetscaleautodiag() function
    +    //       which automatically determines variable scales; however,
    +    //       it does NOT work for non-convex problems.
    +    minqpsetscale(state, s);
    +
    +    //
    +    // Solve problem with BLEIC-based QP solver.
    +    //
    +    // This solver is intended for problems with moderate (up to 50) number
    +    // of general linear constraints and unlimited number of box constraints.
    +    //
    +    // It may solve non-convex problems as long as they are bounded from
    +    // below under constraints.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2,2]
    +
    +    //
    +    // Solve problem with DENSE-AUL solver.
    +    //
    +    // This solver is optimized for problems with up to several thousands of
    +    // variables and large amount of general linear constraints. Problems with
    +    // less than 50 general linear constraints can be efficiently solved with
    +    // BLEIC, problems with box-only constraints can be solved with QuickQP.
    +    // However, DENSE-AUL will work in any (including unconstrained) case.
    +    //
    +    // Algorithm convergence is guaranteed only for convex case, but you may
    +    // expect that it will work for non-convex problems too (because near the
    +    // solution they are locally convex).
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgodenseaul(state, 1.0e-9, 1.0e+4, 5);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2,2]
    +
    +    // Hmm... this problem is bounded from below (has solution) only under constraints.
    +    // What it we remove them?
    +    //
    +    // You may see that BLEIC algorithm detects unboundedness of the problem, 
    +    // -4 is returned as completion code. However, DENSE-AUL is unable to detect
    +    // such situation and it will cycle forever (we do not test it here).
    +    real_1d_array nobndl = "[-inf,-inf]";
    +    real_1d_array nobndu = "[+inf,+inf]";
    +    minqpsetbc(state, nobndl, nobndu);
    +    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: -4
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1
    +    //
    +    // Exact solution is [x0,x1] = [3,2]
    +    //
    +    // We provide algorithm with starting point, although in this case
    +    // (dense matrix, no constraints) it can work without such information.
    +    //
    +    // Several QP solvers are tried: QuickQP, BLEIC, DENSE-AUL.
    +    //
    +    // IMPORTANT: this solver minimizes  following  function:
    +    //     f(x) = 0.5*x'*A*x + b'*x.
    +    // Note that quadratic term has 0.5 before it. So if you want to minimize
    +    // quadratic function, you should rewrite it in such way that quadratic term
    +    // is multiplied by 0.5 too.
    +    //
    +    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    +    //     f(x) = 0.5*(2*x0^2+2*x1^2) + .... 
    +    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    +    //
    +    real_2d_array a = "[[2,0],[0,2]]";
    +    real_1d_array b = "[-6,-4]";
    +    real_1d_array x0 = "[0,1]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array x;
    +    minqpstate state;
    +    minqpreport rep;
    +
    +    // create solver, set quadratic/linear terms
    +    minqpcreate(2, state);
    +    minqpsetquadraticterm(state, a);
    +    minqpsetlinearterm(state, b);
    +    minqpsetstartingpoint(state, x0);
    +
    +    // Set scale of the parameters.
    +    // It is strongly recommended that you set scale of your variables.
    +    // Knowing their scales is essential for evaluation of stopping criteria
    +    // and for preconditioning of the algorithm steps.
    +    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    +    //
    +    // NOTE: for convex problems you may try using minqpsetscaleautodiag()
    +    //       which automatically determines variable scales.
    +    minqpsetscale(state, s);
    +
    +    //
    +    // Solve problem with QuickQP solver.
    +    //
    +    // This solver is intended for medium and large-scale problems with box
    +    // constraints (general linear constraints are not supported), but it can
    +    // also be efficiently used on unconstrained problems.
    +    //
    +    // Default stopping criteria are used, Newton phase is active.
    +    //
    +    minqpsetalgoquickqp(state, 0.0, 0.0, 0.0, 0, true);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2]
    +
    +    //
    +    // Solve problem with BLEIC-based QP solver.
    +    //
    +    // This solver is intended for problems with moderate (up to 50) number
    +    // of general linear constraints and unlimited number of box constraints.
    +    // Of course, unconstrained problems can be solved too.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2]
    +
    +    //
    +    // Solve problem with DENSE-AUL solver.
    +    //
    +    // This solver is optimized for problems with up to several thousands of
    +    // variables and large amount of general linear constraints. Problems with
    +    // less than 50 general linear constraints can be efficiently solved with
    +    // BLEIC, problems with box-only constraints can be solved with QuickQP.
    +    // However, DENSE-AUL will work in any (including unconstrained) case.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgodenseaul(state, 1.0e-9, 1.0e+4, 5);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "optimization.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1,
    +    // with quadratic term given by sparse matrix structure.
    +    //
    +    // Exact solution is [x0,x1] = [3,2]
    +    //
    +    // We provide algorithm with starting point, although in this case
    +    // (dense matrix, no constraints) it can work without such information.
    +    //
    +    // IMPORTANT: this solver minimizes  following  function:
    +    //     f(x) = 0.5*x'*A*x + b'*x.
    +    // Note that quadratic term has 0.5 before it. So if you want to minimize
    +    // quadratic function, you should rewrite it in such way that quadratic term
    +    // is multiplied by 0.5 too.
    +    //
    +    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    +    //     f(x) = 0.5*(2*x0^2+2*x1^2) + ....
    +    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    +    //
    +    sparsematrix a;
    +    real_1d_array b = "[-6,-4]";
    +    real_1d_array x0 = "[0,1]";
    +    real_1d_array s = "[1,1]";
    +    real_1d_array x;
    +    minqpstate state;
    +    minqpreport rep;
    +
    +    // initialize sparsematrix structure
    +    sparsecreate(2, 2, 0, a);
    +    sparseset(a, 0, 0, 2.0);
    +    sparseset(a, 1, 1, 2.0);
    +
    +    // create solver, set quadratic/linear terms
    +    minqpcreate(2, state);
    +    minqpsetquadratictermsparse(state, a, true);
    +    minqpsetlinearterm(state, b);
    +    minqpsetstartingpoint(state, x0);
    +
    +    // Set scale of the parameters.
    +    // It is strongly recommended that you set scale of your variables.
    +    // Knowing their scales is essential for evaluation of stopping criteria
    +    // and for preconditioning of the algorithm steps.
    +    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    +    //
    +    // NOTE: for convex problems you may try using minqpsetscaleautodiag()
    +    //       which automatically determines variable scales.
    +    minqpsetscale(state, s);
    +
    +    //
    +    // Solve problem with BLEIC-based QP solver.
    +    //
    +    // This solver is intended for problems with moderate (up to 50) number
    +    // of general linear constraints and unlimited number of box constraints.
    +    // It also supports sparse problems.
    +    //
    +    // Default stopping criteria are used.
    +    //
    +    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    +    minqpoptimize(state);
    +    minqpresults(state, x, rep);
    +    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2]
    +    return 0;
    +}
    +
    +
    +
    + + +
    +
    /************************************************************************* +Model's errors: + * RelCLSError - fraction of misclassified cases. + * AvgCE - acerage cross-entropy + * RMSError - root-mean-square error + * AvgError - average error + * AvgRelError - average relative error + +NOTE 1: RelCLSError/AvgCE are zero on regression problems. + +NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain + errors in prediction of posterior probabilities +*************************************************************************/ +
    class modelerrors +{ + double relclserror; + double avgce; + double rmserror; + double avgerror; + double avgrelerror; +}; + +
    + +
    +
    /************************************************************************* + +*************************************************************************/ +
    class multilayerperceptron +{ +}; + +
    + +
    +
    /************************************************************************* +Neural network activation function + +INPUT PARAMETERS: + NET - neuron input + K - function index (zero for linear function) + +OUTPUT PARAMETERS: + F - function + DF - its derivative + D2F - its second derivative + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpactivationfunction( + double net, + ae_int_t k, + double& f, + double& df, + double& d2f, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Calculation of all types of errors on subset of dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset given by sparse matrix; + one sample = one row; + first NIn columns contain inputs, + next NOut columns - desired outputs. + SetSize - real size of XY, SetSize>=0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. + +OUTPUT PARAMETERS: + Rep - it contains all type of errors. + + + -- ALGLIB -- + Copyright 04.09.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpallerrorssparsesubset( + multilayerperceptron network, + sparsematrix xy, + ae_int_t setsize, + integer_1d_array subset, + ae_int_t subsetsize, + modelerrors& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Calculation of all types of errors on subset of dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset; one sample = one row; + first NIn columns contain inputs, + next NOut columns - desired outputs. + SetSize - real size of XY, SetSize>=0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. + +OUTPUT PARAMETERS: + Rep - it contains all type of errors. + + -- ALGLIB -- + Copyright 04.09.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpallerrorssubset( + multilayerperceptron network, + real_2d_array xy, + ae_int_t setsize, + integer_1d_array subset, + ae_int_t subsetsize, + modelerrors& rep, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average cross-entropy (in bits per element) on the test set. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: +CrossEntropy/(NPoints*LN(2)). +Zero if network solves regression task. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 08.01.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpavgce( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average cross-entropy (in bits per element) on the test set given by +sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. + +RESULT: +CrossEntropy/(NPoints*LN(2)). +Zero if network solves regression task. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 9.08.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpavgcesparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average absolute error on the test set. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average error when estimating posterior probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 11.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpavgerror( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average absolute error on the test set given by sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. + +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average error when estimating posterior probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 09.08.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpavgerrorsparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average relative error on the test set. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average relative error when estimating posterior probability of +belonging to the correct class. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 11.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpavgrelerror( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Average relative error on the test set given by sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. + +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average relative error when estimating posterior probability of +belonging to the correct class. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 09.08.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpavgrelerrorsparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Classification error of the neural network on dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: + classification error (number of misclassified cases) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mlpclserror( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Copying of neural network + +INPUT PARAMETERS: + Network1 - original + +OUTPUT PARAMETERS: + Network2 - copy + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcopy( + multilayerperceptron network1, + multilayerperceptron& network2, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function copies tunable parameters (weights/means/sigmas) from one +network to another with same architecture. It performs some rudimentary +checks that architectures are same, and throws exception if check fails. + +It is intended for fast copying of states between two network which are +known to have same geometry. + +INPUT PARAMETERS: + Network1 - source, must be correctly initialized + Network2 - target, must have same architecture + +OUTPUT PARAMETERS: + Network2 - network state is copied from source to target + + -- ALGLIB -- + Copyright 20.06.2013 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcopytunableparameters( + multilayerperceptron network1, + multilayerperceptron network2, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Creates neural network with NIn inputs, NOut outputs, without hidden +layers, with linear output layer. Network weights are filled with small +random values. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreate0( + ae_int_t nin, + ae_int_t nout, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreate0, but with one hidden layer (NHid neurons) with +non-linear activation function. Output layer is linear. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreate1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreate0, but with two hidden layers (NHid1 and NHid2 neurons) +with non-linear activation function. Output layer is linear. + $ALL + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreate2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Creates neural network with NIn inputs, NOut outputs, without hidden +layers with non-linear output layer. Network weights are filled with small +random values. + +Activation function of the output layer takes values: + + (B, +INF), if D>=0 + +or + + (-INF, B), if D<0. + + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreateb0( + ae_int_t nin, + ae_int_t nout, + double b, + double d, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreateB0 but with non-linear hidden layer. + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreateb1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + double b, + double d, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreateB0 but with two non-linear hidden layers. + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreateb2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + double b, + double d, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Creates classifier network with NIn inputs and NOut possible classes. +Network contains no hidden layers and linear output layer with SOFTMAX- +normalization (so outputs sums up to 1.0 and converge to posterior +probabilities). + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreatec0( + ae_int_t nin, + ae_int_t nout, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreateC0, but with one non-linear hidden layer. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreatec1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreateC0, but with two non-linear hidden layers. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreatec2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Creates neural network with NIn inputs, NOut outputs, without hidden +layers with non-linear output layer. Network weights are filled with small +random values. Activation function of the output layer takes values [A,B]. + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreater0( + ae_int_t nin, + ae_int_t nout, + double a, + double b, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreateR0, but with non-linear hidden layer. + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreater1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + double a, + double b, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Same as MLPCreateR0, but with two non-linear hidden layers. + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreater2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + double a, + double b, + multilayerperceptron& network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Error of the neural network on dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlperror( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Natural error function for neural network, internal subroutine. + +NOTE: this function is single-threaded. Unlike other error function, it +receives no speed-up from being executed in SMP mode. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlperrorn( + multilayerperceptron network, + real_2d_array xy, + ae_int_t ssize, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Error of the neural network on dataset given by sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0 + +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 23.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlperrorsparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Error of the neural network on subset of sparse dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + SetSize - real size of XY, SetSize>=0; + it is used when SubsetSize<0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. + +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 04.09.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlperrorsparsesubset( + multilayerperceptron network, + sparsematrix xy, + ae_int_t setsize, + integer_1d_array subset, + ae_int_t subsetsize, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Error of the neural network on subset of dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + SetSize - real size of XY, SetSize>=0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. + +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 04.09.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlperrorsubset( + multilayerperceptron network, + real_2d_array xy, + ae_int_t setsize, + integer_1d_array subset, + ae_int_t subsetsize, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function returns offset/scaling coefficients for I-th input of the +network. + +INPUT PARAMETERS: + Network - network + I - input index + +OUTPUT PARAMETERS: + Mean - mean term + Sigma - sigma term, guaranteed to be nonzero. + +I-th input is passed through linear transformation + IN[i] = (IN[i]-Mean)/Sigma +before feeding to the network + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgetinputscaling( + multilayerperceptron network, + ae_int_t i, + double& mean, + double& sigma, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Returns number of inputs. + + -- ALGLIB -- + Copyright 19.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mlpgetinputscount( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function returns total number of layers (including input, hidden and +output layers). + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mlpgetlayerscount( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function returns size of K-th layer. + +K=0 corresponds to input layer, K=CNT-1 corresponds to output layer. + +Size of the output layer is always equal to the number of outputs, although +when we have softmax-normalized network, last neuron doesn't have any +connections - it is just zero. + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mlpgetlayersize( + multilayerperceptron network, + ae_int_t k, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function returns information about Ith neuron of Kth layer + +INPUT PARAMETERS: + Network - network + K - layer index + I - neuron index (within layer) + +OUTPUT PARAMETERS: + FKind - activation function type (used by MLPActivationFunction()) + this value is zero for input or linear neurons + Threshold - also called offset, bias + zero for input neurons + +NOTE: this function throws exception if layer or neuron with given index +do not exists. + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgetneuroninfo( + multilayerperceptron network, + ae_int_t k, + ae_int_t i, + ae_int_t& fkind, + double& threshold, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function returns offset/scaling coefficients for I-th output of the +network. + +INPUT PARAMETERS: + Network - network + I - input index + +OUTPUT PARAMETERS: + Mean - mean term + Sigma - sigma term, guaranteed to be nonzero. + +I-th output is passed through linear transformation + OUT[i] = OUT[i]*Sigma+Mean +before returning it to user. In case we have SOFTMAX-normalized network, +we return (Mean,Sigma)=(0.0,1.0). + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgetoutputscaling( + multilayerperceptron network, + ae_int_t i, + double& mean, + double& sigma, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Returns number of outputs. + + -- ALGLIB -- + Copyright 19.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mlpgetoutputscount( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function returns information about connection from I0-th neuron of +K0-th layer to I1-th neuron of K1-th layer. + +INPUT PARAMETERS: + Network - network + K0 - layer index + I0 - neuron index (within layer) + K1 - layer index + I1 - neuron index (within layer) + +RESULT: + connection weight (zero for non-existent connections) + +This function: +1. throws exception if layer or neuron with given index do not exists. +2. returns zero if neurons exist, but there is no connection between them + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpgetweight( + multilayerperceptron network, + ae_int_t k0, + ae_int_t i0, + ae_int_t k1, + ae_int_t i1, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Returns number of weights. + + -- ALGLIB -- + Copyright 19.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::mlpgetweightscount( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Gradient calculation + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + X - input vector, length of array must be at least NIn + DesiredY- desired outputs, length of array must be at least NOut + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, array[WCount] + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgrad( + multilayerperceptron network, + real_1d_array x, + real_1d_array desiredy, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch gradient calculation for a set of inputs/outputs + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset in dense format; one sample = one row: + * first NIn columns contain inputs, + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SSize - number of elements in XY + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, array[WCount] + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgradbatch( + multilayerperceptron network, + real_2d_array xy, + ae_int_t ssize, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch gradient calculation for a set of inputs/outputs given by sparse +matrices + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset in sparse format; one sample = one row: + * MATRIX MUST BE STORED IN CRS FORMAT + * first NIn columns contain inputs. + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SSize - number of elements in XY + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, array[WCount] + + -- ALGLIB -- + Copyright 26.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgradbatchsparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t ssize, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch gradient calculation for a set of inputs/outputs for a subset of +dataset given by set of indexes. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset in sparse format; one sample = one row: + * MATRIX MUST BE STORED IN CRS FORMAT + * first NIn columns contain inputs, + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SetSize - real size of XY, SetSize>=0; + Idx - subset of SubsetSize elements, array[SubsetSize]: + * Idx[I] stores row index in the original dataset which is + given by XY. Gradient is calculated with respect to rows + whose indexes are stored in Idx[]. + * Idx[] must store correct indexes; this function throws + an exception in case incorrect index (less than 0 or + larger than rows(XY)) is given + * Idx[] may store indexes in any order and even with + repetitions. + SubsetSize- number of elements in Idx[] array: + * positive value means that subset given by Idx[] is processed + * zero value results in zero gradient + * negative value means that full dataset is processed + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, + array[WCount] + +NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse + function. + + -- ALGLIB -- + Copyright 26.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgradbatchsparsesubset( + multilayerperceptron network, + sparsematrix xy, + ae_int_t setsize, + integer_1d_array idx, + ae_int_t subsetsize, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch gradient calculation for a subset of dataset + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset in dense format; one sample = one row: + * first NIn columns contain inputs, + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SetSize - real size of XY, SetSize>=0; + Idx - subset of SubsetSize elements, array[SubsetSize]: + * Idx[I] stores row index in the original dataset which is + given by XY. Gradient is calculated with respect to rows + whose indexes are stored in Idx[]. + * Idx[] must store correct indexes; this function throws + an exception in case incorrect index (less than 0 or + larger than rows(XY)) is given + * Idx[] may store indexes in any order and even with + repetitions. + SubsetSize- number of elements in Idx[] array: + * positive value means that subset given by Idx[] is processed + * zero value results in zero gradient + * negative value means that full dataset is processed + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, + array[WCount] + + -- ALGLIB -- + Copyright 26.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgradbatchsubset( + multilayerperceptron network, + real_2d_array xy, + ae_int_t setsize, + integer_1d_array idx, + ae_int_t subsetsize, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Gradient calculation (natural error function is used) + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + X - input vector, length of array must be at least NIn + DesiredY- desired outputs, length of array must be at least NOut + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, sum-of-squares for regression networks, + cross-entropy for classification networks. + Grad - gradient of E with respect to weights of network, array[WCount] + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgradn( + multilayerperceptron network, + real_1d_array x, + real_1d_array desiredy, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch gradient calculation for a set of inputs/outputs +(natural error function is used) + +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - set of inputs/outputs; one sample = one row; + first NIn columns contain inputs, + next NOut columns - desired outputs. + SSize - number of elements in XY + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. + +OUTPUT PARAMETERS: + E - error function, sum-of-squares for regression networks, + cross-entropy for classification networks. + Grad - gradient of E with respect to weights of network, array[WCount] + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpgradnbatch( + multilayerperceptron network, + real_2d_array xy, + ae_int_t ssize, + double& e, + real_1d_array& grad, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch Hessian calculation using R-algorithm. +Internal subroutine. + + -- ALGLIB -- + Copyright 26.01.2008 by Bochkanov Sergey. + + Hessian calculation based on R-algorithm described in + "Fast Exact Multiplication by the Hessian", + B. A. Pearlmutter, + Neural Computation, 1994. +*************************************************************************/ +
    void alglib::mlphessianbatch( + multilayerperceptron network, + real_2d_array xy, + ae_int_t ssize, + double& e, + real_1d_array& grad, + real_2d_array& h, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Batch Hessian calculation (natural error function) using R-algorithm. +Internal subroutine. + + -- ALGLIB -- + Copyright 26.01.2008 by Bochkanov Sergey. + + Hessian calculation based on R-algorithm described in + "Fast Exact Multiplication by the Hessian", + B. A. Pearlmutter, + Neural Computation, 1994. +*************************************************************************/ +
    void alglib::mlphessiannbatch( + multilayerperceptron network, + real_2d_array xy, + ae_int_t ssize, + double& e, + real_1d_array& grad, + real_2d_array& h, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Internal subroutine. + + -- ALGLIB -- + Copyright 30.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpinitpreprocessor( + multilayerperceptron network, + real_2d_array xy, + ae_int_t ssize, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Tells whether network is SOFTMAX-normalized (i.e. classifier) or not. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    bool alglib::mlpissoftmax( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Procesing + +INPUT PARAMETERS: + Network - neural network + X - input vector, array[0..NIn-1]. + +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + +See also MLPProcessI + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpprocess( + multilayerperceptron network, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +'interactive' variant of MLPProcess for languages like Python which +support constructs like "Y = MLPProcess(NN,X)" and interactive mode of the +interpreter + +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. + + -- ALGLIB -- + Copyright 21.09.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpprocessi( + multilayerperceptron network, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Returns information about initialized network: number of inputs, outputs, +weights. + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpproperties( + multilayerperceptron network, + ae_int_t& nin, + ae_int_t& nout, + ae_int_t& wcount, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Randomization of neural network weights + + -- ALGLIB -- + Copyright 06.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlprandomize( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Randomization of neural network weights and standartisator + + -- ALGLIB -- + Copyright 10.03.2008 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlprandomizefull( + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Relative classification error on the test set. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: +Percent of incorrectly classified cases. Works both for classifier +networks and general purpose networks used as classifiers. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 25.12.2008 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlprelclserror( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Relative classification error on the test set given by sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. Sparse matrix must use CRS format + for storage. + NPoints - points count, >=0. + +RESULT: +Percent of incorrectly classified cases. Works both for classifier +networks and general purpose networks used as classifiers. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 09.08.2012 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlprelclserrorsparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +RMS error on the test set given. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. + +RESULT: +Root mean square error. Its meaning for regression task is obvious. As for +classification task, RMS error means error when estimating posterior +probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). + + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlprmserror( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +RMS error on the test set given by sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. + +RESULT: +Root mean square error. Its meaning for regression task is obvious. As for +classification task, RMS error means error when estimating posterior +probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -Completion codes: -* -5 inappropriate solver was used: - * QuickQP solver for problem with general linear constraints - * Cholesky solver for semidefinite or indefinite problems - * Cholesky solver for problems with non-boundary constraints -* -4 BLEIC-QP or QuickQP solver found unconstrained direction - of negative curvature (function is unbounded from - below even under constraints), no meaningful - minimum can be found. -* -3 inconsistent constraints (or, maybe, feasible point is - too hard to find). If you are sure that constraints are feasible, - try to restart optimizer with better initial approximation. -* -1 solver error -* 1..4 successful completion -* 5 MaxIts steps was taken -* 7 stopping conditions are too stringent, - further improvement is impossible, - X contains best point found so far. + -- ALGLIB -- + Copyright 09.08.2012 by Bochkanov Sergey *************************************************************************/ -
    class minqpreport -{ - ae_int_t inneriterationscount; - ae_int_t outeriterationscount; - ae_int_t nmv; - ae_int_t ncholesky; - ae_int_t terminationtype; -}; +
    double alglib::mlprmserrorsparse( + multilayerperceptron network, + sparsematrix xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This object stores nonlinear optimizer state. -You should use functions provided by MinQP subpackage to work with this -object -*************************************************************************/ -
    class minqpstate -{ -}; +This function serializes data structure to string. +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void mlpserialize(multilayerperceptron &obj, std::string &s_out); +void mlpserialize(multilayerperceptron &obj, std::ostream &s_out);
    - +
     
    /************************************************************************* - CONSTRAINED QUADRATIC PROGRAMMING - -The subroutine creates QP optimizer. After initial creation, it contains -default optimization problem with zero quadratic and linear terms and no -constraints. You should set quadratic/linear terms with calls to functions -provided by MinQP subpackage. - -You should also choose appropriate QP solver and set it and its stopping -criteria by means of MinQPSetAlgo??????() function. Then, you should start -solution process by means of MinQPOptimize() call. Solution itself can be -obtained with MinQPResults() function. +This function sets offset/scaling coefficients for I-th input of the +network. INPUT PARAMETERS: - N - problem size + Network - network + I - input index + Mean - mean term + Sigma - sigma term (if zero, will be replaced by 1.0) -OUTPUT PARAMETERS: - State - optimizer with zero quadratic/linear terms - and no constraints +NTE: I-th input is passed through linear transformation + IN[i] = (IN[i]-Mean)/Sigma +before feeding to the network. This function sets Mean and Sigma. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpcreate(ae_int_t n, minqpstate& state); +
    void alglib::mlpsetinputscaling( + multilayerperceptron network, + ae_int_t i, + double mean, + double sigma, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -This function solves quadratic programming problem. - -Prior to calling this function you should choose solver by means of one of -the following functions: - -* MinQPSetAlgoQuickQP() - for QuickQP solver -* MinQPSetAlgoBLEIC() - for BLEIC-QP solver - -These functions also allow you to control stopping criteria of the solver. -If you did not set solver, MinQP subpackage will automatically select -solver for your problem and will run it with default stopping criteria. - -However, it is better to set explicitly solver and its stopping criteria. +This function modifies information about Ith neuron of Kth layer INPUT PARAMETERS: - State - algorithm state + Network - network + K - layer index + I - neuron index (within layer) + FKind - activation function type (used by MLPActivationFunction()) + this value must be zero for input neurons + (you can not set activation function for input neurons) + Threshold - also called offset, bias + this value must be zero for input neurons + (you can not set threshold for input neurons) -You should use MinQPResults() function to access results after calls -to this function. +NOTES: +1. this function throws exception if layer or neuron with given index do + not exists. +2. this function also throws exception when you try to set non-linear + activation function for input neurons (any kind of network) or for output + neurons of classifier network. +3. this function throws exception when you try to set non-zero threshold for + input neurons (any kind of network). -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey. - Special thanks to Elvira Illarionova for important suggestions on - the linearly constrained QP algorithm. + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpoptimize(minqpstate state); +
    void alglib::mlpsetneuroninfo( + multilayerperceptron network, + ae_int_t k, + ae_int_t i, + ae_int_t fkind, + double threshold, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -QP solver results +This function sets offset/scaling coefficients for I-th output of the +network. INPUT PARAMETERS: - State - algorithm state + Network - network + I - input index + Mean - mean term + Sigma - sigma term (if zero, will be replaced by 1.0) OUTPUT PARAMETERS: - X - array[0..N-1], solution. - This array is allocated and initialized only when - Rep.TerminationType parameter is positive (success). - Rep - optimization report. You should check Rep.TerminationType, - which contains completion code, and you may check another - fields which contain another information about algorithm - functioning. - Failure codes returned by algorithm are: - * -5 inappropriate solver was used: - * Cholesky solver for (semi)indefinite problems - * Cholesky solver for problems with sparse matrix - * QuickQP solver for problem with general linear - constraints - * -4 BLEIC-QP/QuickQP solver found unconstrained - direction of negative curvature (function is - unbounded from below even under constraints), no - meaningful minimum can be found. - * -3 inconsistent constraints (or maybe feasible point - is too hard to find). If you are sure that - constraints are feasible, try to restart optimizer - with better initial approximation. - - Completion codes specific for Cholesky algorithm: - * 4 successful completion - - Completion codes specific for BLEIC/QuickQP algorithms: - * 1 relative function improvement is no more than EpsF. - * 2 scaled step is no more than EpsX. - * 4 scaled gradient norm is no more than EpsG. - * 5 MaxIts steps was taken +NOTE: I-th output is passed through linear transformation + OUT[i] = OUT[i]*Sigma+Mean +before returning it to user. This function sets Sigma/Mean. In case we +have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything +other than(0.0,1.0) - this function will throw exception. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpresults( - minqpstate state, - real_1d_array& x, - minqpreport& rep); +
    void alglib::mlpsetoutputscaling( + multilayerperceptron network, + ae_int_t i, + double mean, + double sigma, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -QP results +This function modifies information about connection from I0-th neuron of +K0-th layer to I1-th neuron of K1-th layer. -Buffered implementation of MinQPResults() which uses pre-allocated buffer -to store X[]. If buffer size is too small, it resizes buffer. It is -intended to be used in the inner cycles of performance critical algorithms -where array reallocation penalty is too large to be ignored. +INPUT PARAMETERS: + Network - network + K0 - layer index + I0 - neuron index (within layer) + K1 - layer index + I1 - neuron index (within layer) + W - connection weight (must be zero for non-existent + connections) + +This function: +1. throws exception if layer or neuron with given index do not exists. +2. throws exception if you try to set non-zero weight for non-existent + connection -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpresultsbuf( - minqpstate state, - real_1d_array& x, - minqpreport& rep); +
    void alglib::mlpsetweight( + multilayerperceptron network, + ae_int_t k0, + ae_int_t i0, + ae_int_t k1, + ae_int_t i1, + double w, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function tells solver to use BLEIC-based algorithm and sets stopping -criteria for the algorithm. - -ALGORITHM FEATURES: - -* supports dense and sparse QP problems -* supports boundary and general linear equality/inequality constraints -* can solve all types of problems (convex, semidefinite, nonconvex) as - long as they are bounded from below under constraints. - Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". - Of course, global minimum is found only for positive definite and - semidefinite problems. As for indefinite ones - only local minimum is - found. - -ALGORITHM OUTLINE: - -* BLEIC-QP solver is just a driver function for MinBLEIC solver; it solves - quadratic programming problem as general linearly constrained - optimization problem, which is solved by means of BLEIC solver (part of - ALGLIB, active set method). - -ALGORITHM LIMITATIONS: +This function unserializes data structure from string. +*************************************************************************/ +
    void mlpunserialize(const std::string &s_in, multilayerperceptron &obj); +void mlpunserialize(const std::istream &s_in, multilayerperceptron &obj); +
    + + + +
    +
    /************************************************************************* +Neural networks ensemble +*************************************************************************/ +
    class mlpensemble +{ +}; -* unlike QuickQP solver, this algorithm does not perform Newton steps and - does not use Level 3 BLAS. Being general-purpose active set method, it - can activate constraints only one-by-one. Thus, its performance is lower - than that of QuickQP. -* its precision is also a bit inferior to that of QuickQP. BLEIC-QP - performs only LBFGS steps (no Newton steps), which are good at detecting - neighborhood of the solution, buy need many iterations to find solution - with more than 6 digits of precision. +
    + +
    +
    /************************************************************************* +Average cross-entropy (in bits per element) on the test set INPUT PARAMETERS: - State - structure which stores algorithm state - EpsG - >=0 - The subroutine finishes its work if the condition - |v|<EpsG is satisfied, where: - * |.| means Euclidian norm - * v - scaled constrained gradient vector, v[i]=g[i]*s[i] - * g - gradient - * s - scaling coefficients set by MinQPSetScale() - EpsF - >=0 - The subroutine finishes its work if exploratory steepest - descent step on k+1-th iteration satisfies following - condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} - EpsX - >=0 - The subroutine finishes its work if exploratory steepest - descent step on k+1-th iteration satisfies following - condition: - * |.| means Euclidian norm - * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - step vector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinQPSetScale() - MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. NOTE: this algorithm uses LBFGS - iterations, which are relatively cheap, but improve - function value only a bit. So you will need many iterations - to converge - from 0.1*N to 10*N, depending on problem's - condition number. - -IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM -BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! + Ensemble- ensemble + XY - test set + NPoints - test set size -Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead -to automatic stopping criterion selection (presently it is small step -length, but it may change in the future versions of ALGLIB). +RESULT: + CrossEntropy/(NPoints*LN(2)). + Zero if ensemble solves regression task. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetalgobleic( - minqpstate state, - double epsg, - double epsf, - double epsx, - ae_int_t maxits); +
    double alglib::mlpeavgce( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function tells solver to use Cholesky-based algorithm. This algorithm -was deprecated in ALGLIB 3.9.0 because its performance is inferior to that -of BLEIC-QP or QuickQP on high-dimensional problems. Furthermore, it -supports only dense convex QP problems. - -This solver is no longer active by default. - -We recommend you to switch to BLEIC-QP or QuickQP solver. +Average error on the test set INPUT PARAMETERS: - State - structure which stores algorithm state + Ensemble- ensemble + XY - test set + NPoints - test set size + +RESULT: + Its meaning for regression task is obvious. As for classification task +it means average error when estimating posterior probabilities. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetalgocholesky(minqpstate state); +
    double alglib::mlpeavgerror( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function tells solver to use QuickQP algorithm: special extra-fast -algorithm for problems with boundary-only constrants. It may solve -non-convex problems as long as they are bounded from below under -constraints. +Average relative error on the test set -ALGORITHM FEATURES: -* many times (from 5x to 50x!) faster than BLEIC-based QP solver; utilizes - accelerated methods for activation of constraints. -* supports dense and sparse QP problems -* supports ONLY boundary constraints; general linear constraints are NOT - supported by this solver -* can solve all types of problems (convex, semidefinite, nonconvex) as - long as they are bounded from below under constraints. - Say, it is possible to solve "min{-x^2} subject to -1<=x<=+1". - In convex/semidefinite case global minimum is returned, in nonconvex - case - algorithm returns one of the local minimums. +INPUT PARAMETERS: + Ensemble- ensemble + XY - test set + NPoints - test set size -ALGORITHM OUTLINE: +RESULT: + Its meaning for regression task is obvious. As for classification task +it means average relative error when estimating posterior probabilities. -* algorithm performs two kinds of iterations: constrained CG iterations - and constrained Newton iterations -* initially it performs small number of constrained CG iterations, which - can efficiently activate/deactivate multiple constraints -* after CG phase algorithm tries to calculate Cholesky decomposition and - to perform several constrained Newton steps. If Cholesky decomposition - failed (matrix is indefinite even under constraints), we perform more - CG iterations until we converge to such set of constraints that system - matrix becomes positive definite. Constrained Newton steps greatly - increase convergence speed and precision. -* algorithm interleaves CG and Newton iterations which allows to handle - indefinite matrices (CG phase) and quickly converge after final set of - constraints is found (Newton phase). Combination of CG and Newton phases - is called "outer iteration". -* it is possible to turn off Newton phase (beneficial for semidefinite - problems - Cholesky decomposition will fail too often) + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpeavgrelerror( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); -ALGORITHM LIMITATIONS: +
    + +
    +
    /************************************************************************* +Like MLPCreate0, but for ensembles. -* algorithm does not support general linear constraints; only boundary - ones are supported -* Cholesky decomposition for sparse problems is performed with Skyline - Cholesky solver, which is intended for low-profile matrices. No profile- - reducing reordering of variables is performed in this version of ALGLIB. -* problems with near-zero negative eigenvalues (or exacty zero ones) may - experience about 2-3x performance penalty. The reason is that Cholesky - decomposition can not be performed until we identify directions of zero - and negative curvature and activate corresponding boundary constraints - - but we need a lot of trial and errors because these directions are hard - to notice in the matrix spectrum. - In this case you may turn off Newton phase of algorithm. - Large negative eigenvalues are not an issue, so highly non-convex - problems can be solved very efficiently. + -- ALGLIB -- + Copyright 18.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpecreate0( + ae_int_t nin, + ae_int_t nout, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - State - structure which stores algorithm state - EpsG - >=0 - The subroutine finishes its work if the condition - |v|<EpsG is satisfied, where: - * |.| means Euclidian norm - * v - scaled constrained gradient vector, v[i]=g[i]*s[i] - * g - gradient - * s - scaling coefficients set by MinQPSetScale() - EpsF - >=0 - The subroutine finishes its work if exploratory steepest - descent step on k+1-th iteration satisfies following - condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} - EpsX - >=0 - The subroutine finishes its work if exploratory steepest - descent step on k+1-th iteration satisfies following - condition: - * |.| means Euclidian norm - * v - scaled step vector, v[i]=dx[i]/s[i] - * dx - step vector, dx=X(k+1)-X(k) - * s - scaling coefficients set by MinQPSetScale() - MaxOuterIts-maximum number of OUTER iterations. One outer iteration - includes some amount of CG iterations (from 5 to ~N) and - one or several (usually small amount) Newton steps. Thus, - one outer iteration has high cost, but can greatly reduce - funcation value. - UseNewton- use Newton phase or not: - * Newton phase improves performance of positive definite - dense problems (about 2 times improvement can be observed) - * can result in some performance penalty on semidefinite - or slightly negative definite problems - each Newton - phase will bring no improvement (Cholesky failure), but - still will require computational time. - * if you doubt, you can turn off this phase - optimizer - will retain its most of its high speed. +
    + +
    +
    /************************************************************************* +Like MLPCreate1, but for ensembles. -IT IS VERY IMPORTANT TO CALL MinQPSetScale() WHEN YOU USE THIS ALGORITHM -BECAUSE ITS STOPPING CRITERIA ARE SCALE-DEPENDENT! + -- ALGLIB -- + Copyright 18.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpecreate1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault); -Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead -to automatic stopping criterion selection (presently it is small step -length, but it may change in the future versions of ALGLIB). +
    + +
    +
    /************************************************************************* +Like MLPCreate2, but for ensembles. -- ALGLIB -- - Copyright 22.05.2014 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetalgoquickqp( - minqpstate state, - double epsg, - double epsf, - double epsx, - ae_int_t maxouterits, - bool usenewton); +
    void alglib::mlpecreate2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets boundary constraints for QP solver - -Boundary constraints are inactive by default (after initial creation). -After being set, they are preserved until explicitly turned off with -another SetBC() call. +Like MLPCreateB0, but for ensembles. -INPUT PARAMETERS: - State - structure stores algorithm state - BndL - lower bounds, array[N]. - If some (all) variables are unbounded, you may specify - very small number or -INF (latter is recommended because - it will allow solver to use better algorithm). - BndU - upper bounds, array[N]. - If some (all) variables are unbounded, you may specify - very large number or +INF (latter is recommended because - it will allow solver to use better algorithm). + -- ALGLIB -- + Copyright 18.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpecreateb0( + ae_int_t nin, + ae_int_t nout, + double b, + double d, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault); -NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th -variable will be "frozen" at X[i]=BndL[i]=BndU[i]. +
    + +
    +
    /************************************************************************* +Like MLPCreateB1, but for ensembles. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetbc( - minqpstate state, - real_1d_array bndl, - real_1d_array bndu); +
    void alglib::mlpecreateb1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + double b, + double d, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets linear constraints for QP optimizer. - -Linear constraints are inactive by default (after initial creation). - -INPUT PARAMETERS: - State - structure previously allocated with MinQPCreate call. - C - linear constraints, array[K,N+1]. - Each row of C represents one constraint, either equality - or inequality (see below): - * first N elements correspond to coefficients, - * last element corresponds to the right part. - All elements of C (including right part) must be finite. - CT - type of constraints, array[K]: - * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] - * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] - * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] - K - number of equality/inequality constraints, K>=0: - * if given, only leading K elements of C/CT are used - * if not given, automatically determined from sizes of C/CT - -NOTE 1: linear (non-bound) constraints are satisfied only approximately - - there always exists some minor violation (about 10^-10...10^-13) - due to numerical errors. +Like MLPCreateB2, but for ensembles. -- ALGLIB -- - Copyright 19.06.2012 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetlc( - minqpstate state, - real_2d_array c, - integer_1d_array ct); -void alglib::minqpsetlc( - minqpstate state, - real_2d_array c, - integer_1d_array ct, - ae_int_t k); +
    void alglib::mlpecreateb2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + double b, + double d, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets linear term for QP solver. - -By default, linear term is zero. - -INPUT PARAMETERS: - State - structure which stores algorithm state - B - linear term, array[N]. +Like MLPCreateC0, but for ensembles. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetlinearterm(minqpstate state, real_1d_array b); +
    void alglib::mlpecreatec0( + ae_int_t nin, + ae_int_t nout, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -This function sets origin for QP solver. By default, following QP program -is solved: - - min(0.5*x'*A*x+b'*x) - -This function allows to solve different problem: - - min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) - -INPUT PARAMETERS: - State - structure which stores algorithm state - XOrigin - origin, array[N]. +Like MLPCreateC1, but for ensembles. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetorigin(minqpstate state, real_1d_array xorigin); +
    void alglib::mlpecreatec1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets dense quadratic term for QP solver. By default, -quadratic term is zero. - -SUPPORT BY ALGLIB QP ALGORITHMS: - -Dense quadratic term can be handled by any of the QP algorithms supported -by ALGLIB QP Solver. - -IMPORTANT: - -This solver minimizes following function: - f(x) = 0.5*x'*A*x + b'*x. -Note that quadratic term has 0.5 before it. So if you want to minimize - f(x) = x^2 + x -you should rewrite your problem as follows: - f(x) = 0.5*(2*x^2) + x -and your matrix A will be equal to [[2.0]], not to [[1.0]] - -INPUT PARAMETERS: - State - structure which stores algorithm state - A - matrix, array[N,N] - IsUpper - (optional) storage type: - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used - * if not given, both lower and upper triangles must be - filled. +Like MLPCreateC2, but for ensembles. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetquadraticterm(minqpstate state, real_2d_array a); -void alglib::minqpsetquadraticterm( - minqpstate state, - real_2d_array a, - bool isupper); +
    void alglib::mlpecreatec2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -This function sets sparse quadratic term for QP solver. By default, -quadratic term is zero. - -IMPORTANT: - -This solver minimizes following function: - f(x) = 0.5*x'*A*x + b'*x. -Note that quadratic term has 0.5 before it. So if you want to minimize - f(x) = x^2 + x -you should rewrite your problem as follows: - f(x) = 0.5*(2*x^2) + x -and your matrix A will be equal to [[2.0]], not to [[1.0]] - -INPUT PARAMETERS: - State - structure which stores algorithm state - A - matrix, array[N,N] - IsUpper - (optional) storage type: - * if True, symmetric matrix A is given by its upper - triangle, and the lower triangle isn’t used - * if False, symmetric matrix A is given by its lower - triangle, and the upper triangle isn’t used - * if not given, both lower and upper triangles must be - filled. +Creates ensemble from network. Only network geometry is copied. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetquadratictermsparse( - minqpstate state, - sparsematrix a, - bool isupper); +
    void alglib::mlpecreatefromnetwork( + multilayerperceptron network, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets scaling coefficients. - -ALGLIB optimizers use scaling matrices to test stopping conditions (step -size and gradient are scaled before comparison with tolerances). Scale of -the I-th variable is a translation invariant measure of: -a) "how large" the variable is -b) how large the step should be to make significant changes in the function - -BLEIC-based QP solver uses scale for two purposes: -* to evaluate stopping conditions -* for preconditioning of the underlying BLEIC solver - -INPUT PARAMETERS: - State - structure stores algorithm state - S - array[N], non-zero scaling coefficients - S[i] may be negative, sign doesn't matter. +Like MLPCreateR0, but for ensembles. -- ALGLIB -- - Copyright 14.01.2011 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetscale(minqpstate state, real_1d_array s); +
    void alglib::mlpecreater0( + ae_int_t nin, + ae_int_t nout, + double a, + double b, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets starting point for QP solver. It is useful to have -good initial approximation to the solution, because it will increase -speed of convergence and identification of active constraints. - -INPUT PARAMETERS: - State - structure which stores algorithm state - X - starting point, array[N]. +Like MLPCreateR1, but for ensembles. -- ALGLIB -- - Copyright 11.01.2011 by Bochkanov Sergey + Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::minqpsetstartingpoint(minqpstate state, real_1d_array x); +
    void alglib::mlpecreater1( + ae_int_t nin, + ae_int_t nhid, + ae_int_t nout, + double a, + double b, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1
    -    // subject to bound constraints 0<=x0<=2.5, 0<=x1<=2.5
    -    //
    -    // Exact solution is [x0,x1] = [2.5,2]
    -    //
    -    // We provide algorithm with starting point. With such small problem good starting
    -    // point is not really necessary, but with high-dimensional problem it can save us
    -    // a lot of time.
    -    //
    -    // IMPORTANT: this solver minimizes  following  function:
    -    //     f(x) = 0.5*x'*A*x + b'*x.
    -    // Note that quadratic term has 0.5 before it. So if you want to minimize
    -    // quadratic function, you should rewrite it in such way that quadratic term
    -    // is multiplied by 0.5 too.
    -    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    -    //     f(x) = 0.5*(2*x0^2+2*x1^2) + ....
    -    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    -    //
    -    real_2d_array a = "[[2,0],[0,2]]";
    -    real_1d_array b = "[-6,-4]";
    -    real_1d_array x0 = "[0,1]";
    -    real_1d_array s = "[1,1]";
    -    real_1d_array bndl = "[0.0,0.0]";
    -    real_1d_array bndu = "[2.5,2.5]";
    -    real_1d_array x;
    -    minqpstate state;
    -    minqpreport rep;
    -
    -    // create solver, set quadratic/linear terms
    -    minqpcreate(2, state);
    -    minqpsetquadraticterm(state, a);
    -    minqpsetlinearterm(state, b);
    -    minqpsetstartingpoint(state, x0);
    -    minqpsetbc(state, bndl, bndu);
    -
    -    // Set scale of the parameters.
    -    // It is strongly recommended that you set scale of your variables.
    -    // Knowing their scales is essential for evaluation of stopping criteria
    -    // and for preconditioning of the algorithm steps.
    -    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    -    minqpsetscale(state, s);
    -
    -    // solve problem with QuickQP solver, default stopping criteria are used
    -    minqpsetalgoquickqp(state, 0.0, 0.0, 0.0, 0, true);
    -    minqpoptimize(state);
    -    minqpresults(state, x, rep);
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2.5,2]
    -
    -    // solve problem with BLEIC-based QP solver
    -    // default stopping criteria are used.
    -    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    -    minqpoptimize(state);
    -    minqpresults(state, x, rep);
    -    printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2.5,2]
    -    return 0;
    -}
    +
    /************************************************************************* +Like MLPCreateR2, but for ensembles. + -- ALGLIB -- + Copyright 18.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpecreater2( + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + double a, + double b, + ae_int_t ensemblesize, + mlpensemble& ensemble, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1
    -    // subject to linear constraint x0+x1<=2
    -    //
    -    // Exact solution is [x0,x1] = [1.5,0.5]
    -    //
    -    // IMPORTANT: this solver minimizes  following  function:
    -    //     f(x) = 0.5*x'*A*x + b'*x.
    -    // Note that quadratic term has 0.5 before it. So if you want to minimize
    -    // quadratic function, you should rewrite it in such way that quadratic term
    -    // is multiplied by 0.5 too.
    -    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    -    //     f(x) = 0.5*(2*x0^2+2*x1^2) + ....
    -    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    -    //
    -    real_2d_array a = "[[2,0],[0,2]]";
    -    real_1d_array b = "[-6,-4]";
    -    real_1d_array s = "[1,1]";
    -    real_2d_array c = "[[1.0,1.0,2.0]]";
    -    integer_1d_array ct = "[-1]";
    -    real_1d_array x;
    -    minqpstate state;
    -    minqpreport rep;
    -
    -    // create solver, set quadratic/linear terms
    -    minqpcreate(2, state);
    -    minqpsetquadraticterm(state, a);
    -    minqpsetlinearterm(state, b);
    -    minqpsetlc(state, c, ct);
    -
    -    // Set scale of the parameters.
    -    // It is strongly recommended that you set scale of your variables.
    -    // Knowing their scales is essential for evaluation of stopping criteria
    -    // and for preconditioning of the algorithm steps.
    -    // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php
    -    minqpsetscale(state, s);
    -
    -    // solve problem with BLEIC-based QP solver
    -    // default stopping criteria are used.
    -    minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0);
    -    minqpoptimize(state);
    -    minqpresults(state, x, rep);
    -    printf("%s\n", x.tostring(1).c_str()); // EXPECTED: [1.500,0.500]
    -
    -    // solve problem with QuickQP solver, default stopping criteria are used
    -    // Oops! It does not support general linear constraints, -5 returned as completion code!
    -    minqpsetalgoquickqp(state, 0.0, 0.0, 0.0, 0, true);
    -    minqpoptimize(state);
    -    minqpresults(state, x, rep);
    -    printf("%d\n", int(rep.terminationtype)); // EXPECTED: -5
    -    return 0;
    -}
    +
    /************************************************************************* +Return normalization type (whether ensemble is SOFTMAX-normalized or not). + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    bool alglib::mlpeissoftmax( + mlpensemble ensemble, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of nonconvex function
    -    //     F(x0,x1) = -(x0^2+x1^2)
    -    // subject to constraints x0,x1 in [1.0,2.0]
    -    // Exact solution is [x0,x1] = [2,2].
    -    //
    -    // IMPORTANT: this solver minimizes  following  function:
    -    //     f(x) = 0.5*x'*A*x + b'*x.
    -    // Note that quadratic term has 0.5 before it. So if you want to minimize
    -    // quadratic function, you should rewrite it in such way that quadratic term
    -    // is multiplied by 0.5 too.
    -    //
    -    // For example, our function is f(x)=-(x0^2+x1^2), but we rewrite it as 
    -    //     f(x) = 0.5*(-2*x0^2-2*x1^2)
    -    // and pass diag(-2,-2) as quadratic term - NOT diag(-1,-1)!
    -    //
    -    real_2d_array a = "[[-2,0],[0,-2]]";
    -    real_1d_array x0 = "[1,1]";
    -    real_1d_array s = "[1,1]";
    -    real_1d_array bndl = "[1.0,1.0]";
    -    real_1d_array bndu = "[2.0,2.0]";
    -    real_1d_array x;
    -    minqpstate state;
    -    minqpreport rep;
    -
    -    // create solver, set quadratic/linear terms, constraints
    -    minqpcreate(2, state);
    -    minqpsetquadraticterm(state, a);
    -    minqpsetstartingpoint(state, x0);
    -    minqpsetbc(state, bndl, bndu);
    +
    /************************************************************************* +Procesing - // Set scale of the parameters. - // It is strongly recommended that you set scale of your variables. - // Knowing their scales is essential for evaluation of stopping criteria - // and for preconditioning of the algorithm steps. - // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php - minqpsetscale(state, s); +INPUT PARAMETERS: + Ensemble- neural networks ensemble + X - input vector, array[0..NIn-1]. + Y - (possibly) preallocated buffer; if size of Y is less than + NOut, it will be reallocated. If it is large enough, it + is NOT reallocated, so we can save some time on reallocation. - // solve problem with BLEIC-QP solver. - // default stopping criteria are used. - minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0); - minqpoptimize(state); - minqpresults(state, x, rep); - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [2,2] - // Hmm... this problem is bounded from below (has solution) only under constraints. - // What it we remove them? - // - // You may see that algorithm detects unboundedness of the problem, - // -4 is returned as completion code. - real_1d_array nobndl = "[-inf,-inf]"; - real_1d_array nobndu = "[+inf,+inf]"; - minqpsetbc(state, nobndl, nobndu); - minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0); - minqpoptimize(state); - minqpresults(state, x, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: -4 - return 0; -} +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpeprocess( + mlpensemble ensemble, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1
    -    //
    -    // Exact solution is [x0,x1] = [3,2]
    -    //
    -    // We provide algorithm with starting point, although in this case
    -    // (dense matrix, no constraints) it can work without such information.
    -    //
    -    // IMPORTANT: this solver minimizes  following  function:
    -    //     f(x) = 0.5*x'*A*x + b'*x.
    -    // Note that quadratic term has 0.5 before it. So if you want to minimize
    -    // quadratic function, you should rewrite it in such way that quadratic term
    -    // is multiplied by 0.5 too.
    -    //
    -    // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as 
    -    //     f(x) = 0.5*(2*x0^2+2*x1^2) + ....
    -    // and pass diag(2,2) as quadratic term - NOT diag(1,1)!
    -    //
    -    real_2d_array a = "[[2,0],[0,2]]";
    -    real_1d_array b = "[-6,-4]";
    -    real_1d_array x0 = "[0,1]";
    -    real_1d_array s = "[1,1]";
    -    real_1d_array x;
    -    minqpstate state;
    -    minqpreport rep;
    -
    -    // create solver, set quadratic/linear terms
    -    minqpcreate(2, state);
    -    minqpsetquadraticterm(state, a);
    -    minqpsetlinearterm(state, b);
    -    minqpsetstartingpoint(state, x0);
    +
    /************************************************************************* +'interactive' variant of MLPEProcess for languages like Python which +support constructs like "Y = MLPEProcess(LM,X)" and interactive mode of the +interpreter - // Set scale of the parameters. - // It is strongly recommended that you set scale of your variables. - // Knowing their scales is essential for evaluation of stopping criteria - // and for preconditioning of the algorithm steps. - // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php - minqpsetscale(state, s); +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. - // solve problem with QuickQP solver, default stopping criteria are used, Newton phase is active - minqpsetalgoquickqp(state, 0.0, 0.0, 0.0, 0, true); - minqpoptimize(state); - minqpresults(state, x, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 4 - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2] + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpeprocessi( + mlpensemble ensemble, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault); - // solve problem with BLEIC-based QP solver. - // default stopping criteria are used. - minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0); - minqpoptimize(state); - minqpresults(state, x, rep); - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2] - return 0; -} +
    + +
    +
    /************************************************************************* +Return ensemble properties (number of inputs and outputs). + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpeproperties( + mlpensemble ensemble, + ae_int_t& nin, + ae_int_t& nout, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "optimization.h"
    +
    /************************************************************************* +Randomization of MLP ensemble -using namespace alglib; + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlperandomize( + mlpensemble ensemble, + const xparams _params = alglib::xdefault); +
    + +
    +
    /************************************************************************* +Relative classification error on the test set -int main(int argc, char **argv) -{ - // - // This example demonstrates minimization of F(x0,x1) = x0^2 + x1^2 -6*x0 - 4*x1, - // with quadratic term given by sparse matrix structure. - // - // Exact solution is [x0,x1] = [3,2] - // - // We provide algorithm with starting point, although in this case - // (dense matrix, no constraints) it can work without such information. - // - // IMPORTANT: this solver minimizes following function: - // f(x) = 0.5*x'*A*x + b'*x. - // Note that quadratic term has 0.5 before it. So if you want to minimize - // quadratic function, you should rewrite it in such way that quadratic term - // is multiplied by 0.5 too. - // - // For example, our function is f(x)=x0^2+x1^2+..., but we rewrite it as - // f(x) = 0.5*(2*x0^2+2*x1^2) + .... - // and pass diag(2,2) as quadratic term - NOT diag(1,1)! - // - sparsematrix a; - real_1d_array b = "[-6,-4]"; - real_1d_array x0 = "[0,1]"; - real_1d_array s = "[1,1]"; - real_1d_array x; - minqpstate state; - minqpreport rep; +INPUT PARAMETERS: + Ensemble- ensemble + XY - test set + NPoints - test set size - // initialize sparsematrix structure - sparsecreate(2, 2, 0, a); - sparseset(a, 0, 0, 2.0); - sparseset(a, 1, 1, 2.0); +RESULT: + percent of incorrectly classified cases. + Works both for classifier betwork and for regression networks which +are used as classifiers. - // create solver, set quadratic/linear terms - minqpcreate(2, state); - minqpsetquadratictermsparse(state, a, true); - minqpsetlinearterm(state, b); - minqpsetstartingpoint(state, x0); + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlperelclserror( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); - // Set scale of the parameters. - // It is strongly recommended that you set scale of your variables. - // Knowing their scales is essential for evaluation of stopping criteria - // and for preconditioning of the algorithm steps. - // You can find more information on scaling at http://www.alglib.net/optimization/scaling.php - minqpsetscale(state, s); +
    + +
    +
    /************************************************************************* +RMS error on the test set - // solve problem with BLEIC-based QP solver. - // default stopping criteria are used. - minqpsetalgobleic(state, 0.0, 0.0, 0.0, 0); - minqpoptimize(state); - minqpresults(state, x, rep); - printf("%s\n", x.tostring(2).c_str()); // EXPECTED: [3,2] +INPUT PARAMETERS: + Ensemble- ensemble + XY - test set + NPoints - test set size - // try to solve problem with Cholesky-based QP solver... - // Oops! It does not support sparse matrices, -5 returned as completion code! - minqpsetalgocholesky(state); - minqpoptimize(state); - minqpresults(state, x, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: -5 - return 0; -} +RESULT: + root mean square error. + Its meaning for regression task is obvious. As for classification task +RMS error means error when estimating posterior probabilities. + + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::mlpermserror( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault); +
    + +
    +
    /************************************************************************* +This function serializes data structure to string. -
    +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void mlpeserialize(mlpensemble &obj, std::string &s_out); +void mlpeserialize(mlpensemble &obj, std::ostream &s_out); +
    + +
    +
    /************************************************************************* +This function unserializes data structure from string. +*************************************************************************/ +
    void mlpeunserialize(const std::string &s_in, mlpensemble &obj); +void mlpeunserialize(const std::istream &s_in, mlpensemble &obj); +
    +
    -modelerrors
    -multilayerperceptron
    +mlpcvreport
    +mlpreport
    +mlptrainer
    -mlpactivationfunction
    -mlpallerrorssparsesubset
    -mlpallerrorssubset
    -mlpavgce
    -mlpavgcesparse
    -mlpavgerror
    -mlpavgerrorsparse
    -mlpavgrelerror
    -mlpavgrelerrorsparse
    -mlpclserror
    -mlpcopy
    -mlpcopytunableparameters
    -mlpcreate0
    -mlpcreate1
    -mlpcreate2
    -mlpcreateb0
    -mlpcreateb1
    -mlpcreateb2
    -mlpcreatec0
    -mlpcreatec1
    -mlpcreatec2
    -mlpcreater0
    -mlpcreater1
    -mlpcreater2
    -mlperror
    -mlperrorn
    -mlperrorsparse
    -mlperrorsparsesubset
    -mlperrorsubset
    -mlpgetinputscaling
    -mlpgetinputscount
    -mlpgetlayerscount
    -mlpgetlayersize
    -mlpgetneuroninfo
    -mlpgetoutputscaling
    -mlpgetoutputscount
    -mlpgetweight
    -mlpgetweightscount
    -mlpgrad
    -mlpgradbatch
    -mlpgradbatchsparse
    -mlpgradbatchsparsesubset
    -mlpgradbatchsubset
    -mlpgradn
    -mlpgradnbatch
    -mlphessianbatch
    -mlphessiannbatch
    -mlpinitpreprocessor
    -mlpissoftmax
    -mlpprocess
    -mlpprocessi
    -mlpproperties
    -mlprandomize
    -mlprandomizefull
    -mlprelclserror
    -mlprelclserrorsparse
    -mlprmserror
    -mlprmserrorsparse
    -mlpserialize
    -mlpsetinputscaling
    -mlpsetneuroninfo
    -mlpsetoutputscaling
    -mlpsetweight
    -mlpunserialize
    +mlpcontinuetraining
    +mlpcreatetrainer
    +mlpcreatetrainercls
    +mlpebagginglbfgs
    +mlpebagginglm
    +mlpetraines
    +mlpkfoldcv
    +mlpkfoldcvlbfgs
    +mlpkfoldcvlm
    +mlpsetalgobatch
    +mlpsetcond
    +mlpsetdataset
    +mlpsetdecay
    +mlpsetsparsedataset
    +mlpstarttraining
    +mlptrainensemblees
    +mlptraines
    +mlptrainlbfgs
    +mlptrainlm
    +mlptrainnetwork
    + + + + + + + +
    nn_cls2 Binary classification problem
    nn_cls3 Multiclass classification problem
    nn_crossvalidation Cross-validation
    nn_ensembles_es Early stopping ensembles
    nn_parallel Parallel training
    nn_regr Regression problem with one output (2=>1)
    nn_regr_n Regression problem with multiple outputs (2=>2)
    nn_trainerobject Advanced example on trainer object
    - +
     
    /************************************************************************* -Model's errors: +Cross-validation estimates of generalization error +*************************************************************************/ +
    class mlpcvreport +{ + double relclserror; + double avgce; + double rmserror; + double avgerror; + double avgrelerror; +}; + +
    + +
    +
    /************************************************************************* +Training report: * RelCLSError - fraction of misclassified cases. * AvgCE - acerage cross-entropy * RMSError - root-mean-square error * AvgError - average error * AvgRelError - average relative error + * NGrad - number of gradient calculations + * NHess - number of Hessian calculations + * NCholesky - number of Cholesky decompositions NOTE 1: RelCLSError/AvgCE are zero on regression problems. NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain errors in prediction of posterior probabilities *************************************************************************/ -
    class modelerrors +
    class mlpreport { double relclserror; double avgce; double rmserror; double avgerror; double avgrelerror; + ae_int_t ngrad; + ae_int_t nhess; + ae_int_t ncholesky; };
    - +
     
    /************************************************************************* +Trainer object for neural network. +You should not try to access fields of this object directly - use ALGLIB +functions to work with this object. *************************************************************************/ -
    class multilayerperceptron +
    class mlptrainer { };
    - +
     
    /************************************************************************* -Neural network activation function +IMPORTANT: this is an "expert" version of the MLPTrain() function. We do + not recommend you to use it unless you are pretty sure that you + need ability to monitor training progress. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +This function performs step-by-step training of the neural network. Here +"step-by-step" means that training starts with MLPStartTraining() call, +and then user subsequently calls MLPContinueTraining() to perform one more +iteration of the training. + +This function performs one more iteration of the training and returns +either True (training continues) or False (training stopped). In case True +was returned, Network weights are updated according to the current state +of the optimization progress. In case False was returned, no additional +updates is performed (previous update of the network weights moved us to +the final point, and no additional updates is needed). + +EXAMPLE: + > + > [initialize network and trainer object] + > + > MLPStartTraining(Trainer, Network, True) + > while MLPContinueTraining(Trainer, Network) do + > [visualize training progress] + > INPUT PARAMETERS: - NET - neuron input - K - function index (zero for linear function) + S - trainer object + Network - neural network structure, which is used to store + current state of the training process. OUTPUT PARAMETERS: - F - function - DF - its derivative - D2F - its second derivative + Network - weights of the neural network are rewritten by the + current approximation. + +NOTE: this method uses sum-of-squares error function for training. + +NOTE: it is expected that trainer object settings are NOT changed during + step-by-step training, i.e. no one changes stopping criteria or + training set during training. It is possible and there is no defense + against such actions, but algorithm behavior in such cases is + undefined and can be unpredictable. + +NOTE: It is expected that Network is the same one which was passed to + MLPStartTraining() function. However, THIS function checks only + following: + * that number of network inputs is consistent with trainer object + settings + * that number of network outputs/classes is consistent with trainer + object settings + * that number of network weights is the same as number of weights in + the network passed to MLPStartTraining() function + Exception is thrown when these conditions are violated. + + It is also expected that you do not change state of the network on + your own - the only party who has right to change network during its + training is a trainer object. Any attempt to interfere with trainer + may lead to unpredictable results. + + + -- ALGLIB -- + Copyright 23.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    bool alglib::mlpcontinuetraining( + mlptrainer s, + multilayerperceptron network, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Creation of the network trainer object for regression networks + +INPUT PARAMETERS: + NIn - number of inputs, NIn>=1 + NOut - number of outputs, NOut>=1 + +OUTPUT PARAMETERS: + S - neural network trainer object. + This structure can be used to train any regression + network with NIn inputs and NOut outputs. + + -- ALGLIB -- + Copyright 23.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreatetrainer( + ae_int_t nin, + ae_int_t nout, + mlptrainer& s, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  

    + +
    +
    /************************************************************************* +Creation of the network trainer object for classification networks + +INPUT PARAMETERS: + NIn - number of inputs, NIn>=1 + NClasses - number of classes, NClasses>=2 + +OUTPUT PARAMETERS: + S - neural network trainer object. + This structure can be used to train any classification + network with NIn inputs and NOut outputs. + + -- ALGLIB -- + Copyright 23.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpcreatetrainercls( + ae_int_t nin, + ae_int_t nclasses, + mlptrainer& s, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  [2]  

    + +
    +
    /************************************************************************* +Training neural networks ensemble using bootstrap aggregating (bagging). +L-BFGS algorithm is used as base training method. + +INPUT PARAMETERS: + Ensemble - model with initialized geometry + XY - training set + NPoints - training set size + Decay - weight decay coefficient, >=0.001 + Restarts - restarts, >0. + WStep - stopping criterion, same as in MLPTrainLBFGS + MaxIts - stopping criterion, same as in MLPTrainLBFGS + +OUTPUT PARAMETERS: + Ensemble - trained model + Info - return code: + * -8, if both WStep=0 and MaxIts=0 + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed + (NPoints<0, Restarts<1). + * 2, if task has been solved. + Rep - training report. + OOBErrors - out-of-bag generalization error estimate + + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpebagginglbfgs( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + double wstep, + ae_int_t maxits, + ae_int_t& info, + mlpreport& rep, + mlpcvreport& ooberrors, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Training neural networks ensemble using bootstrap aggregating (bagging). +Modified Levenberg-Marquardt algorithm is used as base training method. + +INPUT PARAMETERS: + Ensemble - model with initialized geometry + XY - training set + NPoints - training set size + Decay - weight decay coefficient, >=0.001 + Restarts - restarts, >0. + +OUTPUT PARAMETERS: + Ensemble - trained model + Info - return code: + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed + (NPoints<0, Restarts<1). + * 2, if task has been solved. + Rep - training report. + OOBErrors - out-of-bag generalization error estimate + + -- ALGLIB -- + Copyright 17.02.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::mlpebagginglm( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + ae_int_t& info, + mlpreport& rep, + mlpcvreport& ooberrors, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Training neural networks ensemble using early stopping. + +INPUT PARAMETERS: + Ensemble - model with initialized geometry + XY - training set + NPoints - training set size + Decay - weight decay coefficient, >=0.001 + Restarts - restarts, >0. + +OUTPUT PARAMETERS: + Ensemble - trained model + Info - return code: + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed + (NPoints<0, Restarts<1). + * 6, if task has been solved. + Rep - training report. + OOBErrors - out-of-bag generalization error estimate -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpactivationfunction( - double net, - ae_int_t k, - double& f, - double& df, - double& d2f); +
    void alglib::mlpetraines( + mlpensemble ensemble, + real_2d_array xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + ae_int_t& info, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Calculation of all types of errors on subset of dataset. - -FOR USERS OF COMMERCIAL EDITION: +This function estimates generalization error using cross-validation on the +current dataset with current training settings. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset given by sparse matrix; - one sample = one row; - first NIn columns contain inputs, - next NOut columns - desired outputs. - SetSize - real size of XY, SetSize>=0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. + S - trainer object + Network - neural network. It must have same number of inputs and + output/classes as was specified during creation of the + trainer object. Network is not changed during cross- + validation and is not trained - it is used only as + representative of its architecture. I.e., we estimate + generalization properties of ARCHITECTURE, not some + specific network. + NRestarts - number of restarts, >=0: + * NRestarts>0 means that for each cross-validation + round specified number of random restarts is + performed, with best network being chosen after + training. + * NRestarts=0 is same as NRestarts=1 + FoldsCount - number of folds in k-fold cross-validation: + * 2<=FoldsCount<=size of dataset + * recommended value: 10. + * values larger than dataset size will be silently + truncated down to dataset size OUTPUT PARAMETERS: - Rep - it contains all type of errors. + Rep - structure which contains cross-validation estimates: + * Rep.RelCLSError - fraction of misclassified cases. + * Rep.AvgCE - acerage cross-entropy + * Rep.RMSError - root-mean-square error + * Rep.AvgError - average error + * Rep.AvgRelError - average relative error + +NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), + or subset with only one point was given, zeros are returned as + estimates. + +NOTE: this method performs FoldsCount cross-validation rounds, each one + with NRestarts random starts. Thus, FoldsCount*NRestarts networks + are trained in total. + +NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems. +NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError + contain errors in prediction of posterior probabilities. -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpallerrorssparsesubset( - multilayerperceptron network, - sparsematrix xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize, - modelerrors& rep); -void alglib::smp_mlpallerrorssparsesubset( +
    void alglib::mlpkfoldcv( + mlptrainer s, multilayerperceptron network, - sparsematrix xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize, - modelerrors& rep); + ae_int_t nrestarts, + ae_int_t foldscount, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Calculation of all types of errors on subset of dataset. - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +Cross-validation estimate of generalization error. +Base algorithm - L-BFGS. INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset; one sample = one row; - first NIn columns contain inputs, - next NOut columns - desired outputs. - SetSize - real size of XY, SetSize>=0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. + Network - neural network with initialized geometry. Network is + not changed during cross-validation - it is used only + as a representative of its architecture. + XY - training set. + SSize - training set size + Decay - weight decay, same as in MLPTrainLBFGS + Restarts - number of restarts, >0. + restarts are counted for each partition separately, so + total number of restarts will be Restarts*FoldsCount. + WStep - stopping criterion, same as in MLPTrainLBFGS + MaxIts - stopping criterion, same as in MLPTrainLBFGS + FoldsCount - number of folds in k-fold cross-validation, + 2<=FoldsCount<=SSize. + recommended value: 10. OUTPUT PARAMETERS: - Rep - it contains all type of errors. + Info - return code, same as in MLPTrainLBFGS + Rep - report, same as in MLPTrainLM/MLPTrainLBFGS + CVRep - generalization error estimates -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey + Copyright 09.12.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpallerrorssubset( - multilayerperceptron network, - real_2d_array xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize, - modelerrors& rep); -void alglib::smp_mlpallerrorssubset( +
    void alglib::mlpkfoldcvlbfgs( multilayerperceptron network, real_2d_array xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize, - modelerrors& rep); + ae_int_t npoints, + double decay, + ae_int_t restarts, + double wstep, + ae_int_t maxits, + ae_int_t foldscount, + ae_int_t& info, + mlpreport& rep, + mlpcvreport& cvrep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average cross-entropy (in bits per element) on the test set. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +Cross-validation estimate of generalization error. +Base algorithm - Levenberg-Marquardt. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -CrossEntropy/(NPoints*LN(2)). -Zero if network solves regression task. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + Network - neural network with initialized geometry. Network is + not changed during cross-validation - it is used only + as a representative of its architecture. + XY - training set. + SSize - training set size + Decay - weight decay, same as in MLPTrainLBFGS + Restarts - number of restarts, >0. + restarts are counted for each partition separately, so + total number of restarts will be Restarts*FoldsCount. + FoldsCount - number of folds in k-fold cross-validation, + 2<=FoldsCount<=SSize. + recommended value: 10. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +OUTPUT PARAMETERS: + Info - return code, same as in MLPTrainLBFGS + Rep - report, same as in MLPTrainLM/MLPTrainLBFGS + CVRep - generalization error estimates -- ALGLIB -- - Copyright 08.01.2009 by Bochkanov Sergey + Copyright 09.12.2007 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpavgce( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -double alglib::smp_mlpavgce( +
    void alglib::mlpkfoldcvlm( multilayerperceptron network, real_2d_array xy, - ae_int_t npoints); + ae_int_t npoints, + double decay, + ae_int_t restarts, + ae_int_t foldscount, + ae_int_t& info, + mlpreport& rep, + mlpcvreport& cvrep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average cross-entropy (in bits per element) on the test set given by -sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +This function sets training algorithm: batch training using L-BFGS will be +used. +This algorithm: +* the most robust for small-scale problems, but may be too slow for large + scale ones. +* perfoms full pass through the dataset before performing step +* uses conditions specified by MLPSetCond() for stopping +* is default one used by trainer object INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. - -RESULT: -CrossEntropy/(NPoints*LN(2)). -Zero if network solves regression task. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs - -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). + S - trainer object -- ALGLIB -- - Copyright 9.08.2012 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpavgcesparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); -double alglib::smp_mlpavgcesparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); +
    void alglib::mlpsetalgobatch( + mlptrainer s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average absolute error on the test set. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +This function sets stopping criteria for the optimizer. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average error when estimating posterior probabilities. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. + S - trainer object + WStep - stopping criterion. Algorithm stops if step size is + less than WStep. Recommended value - 0.01. Zero step + size means stopping after MaxIts iterations. + WStep>=0. + MaxIts - stopping criterion. Algorithm stops after MaxIts + epochs (full passes over entire dataset). Zero MaxIts + means stopping when step is sufficiently small. + MaxIts>=0. -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also + used when MLPSetCond() is called with WStep=0 and MaxIts=0. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +NOTE: these stopping criteria are used for all kinds of neural training - + from "conventional" networks to early stopping ensembles. When used + for "conventional" networks, they are used as the only stopping + criteria. When combined with early stopping, they used as ADDITIONAL + stopping criteria which can terminate early stopping algorithm. -- ALGLIB -- - Copyright 11.03.2008 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpavgerror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -double alglib::smp_mlpavgerror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::mlpsetcond( + mlptrainer s, + double wstep, + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average absolute error on the test set given by sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +This function sets "current dataset" of the trainer object to one passed +by user. INPUT PARAMETERS: - Network - neural network; + S - trainer object XY - training set, see below for information on the training set format. This function checks correctness of the dataset (no NANs/INFs, class numbers are correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. + is passed. NPoints - points count, >=0. -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average error when estimating posterior probabilities. - DATASET FORMAT: This function uses two different dataset formats - one for regression @@ -30371,147 +41281,62 @@ * first NIn columns are inputs, next NOut columns are outputs For classification networks with NIn inputs and NClasses clases following -dataset format is used: +datasetformat is used: * dataset is given by NPoints*(NIn+1) matrix * each row corresponds to one example * first NIn columns are inputs, last column stores class number (from 0 to NClasses-1). -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpavgerrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); -double alglib::smp_mlpavgerrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); +
    void alglib::mlpsetdataset( + mlptrainer s, + real_2d_array xy, + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  [7]  [8]  

    +
     
    /************************************************************************* -Average relative error on the test set. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +This function sets weight decay coefficient which is used for training. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average relative error when estimating posterior probability of -belonging to the correct class. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + S - trainer object + Decay - weight decay coefficient, >=0. Weight decay term + 'Decay*||Weights||^2' is added to error function. If + you don't know what Decay to choose, use 1.0E-3. + Weight decay can be set to zero, in this case network + is trained without weight decay. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +NOTE: by default network uses some small nonzero value for weight decay. -- ALGLIB -- - Copyright 11.03.2008 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpavgrelerror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -double alglib::smp_mlpavgrelerror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::mlpsetdecay( + mlptrainer s, + double decay, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Average relative error on the test set given by sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +This function sets "current dataset" of the trainer object to one passed +by user (sparse matrix is used to store dataset). INPUT PARAMETERS: - Network - neural network; + S - trainer object XY - training set, see below for information on the training set format. This function checks correctness of the dataset (no NANs/INFs, class numbers are correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. - -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average relative error when estimating posterior probability of -belonging to the correct class. + is passed. Any sparse storage format can be used: + Hash-table, CRS... + NPoints - points count, >=0 DATASET FORMAT: @@ -30525,7601 +41350,8679 @@ * first NIn columns are inputs, next NOut columns are outputs For classification networks with NIn inputs and NClasses clases following -dataset format is used: +datasetformat is used: * dataset is given by NPoints*(NIn+1) matrix * each row corresponds to one example * first NIn columns are inputs, last column stores class number (from 0 to NClasses-1). -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpavgrelerrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); -double alglib::smp_mlpavgrelerrorsparse( - multilayerperceptron network, +
    void alglib::mlpsetsparsedataset( + mlptrainer s, sparsematrix xy, - ae_int_t npoints); + ae_int_t npoints, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Classification error of the neural network on dataset. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - - -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: - classification error (number of misclassified cases) - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +IMPORTANT: this is an "expert" version of the MLPTrain() function. We do + not recommend you to use it unless you are pretty sure that you + need ability to monitor training progress. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +This function performs step-by-step training of the neural network. Here +"step-by-step" means that training starts with MLPStartTraining() call, +and then user subsequently calls MLPContinueTraining() to perform one more +iteration of the training. - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::mlpclserror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -ae_int_t alglib::smp_mlpclserror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); +After call to this function trainer object remembers network and is ready +to train it. However, no training is performed until first call to +MLPContinueTraining() function. Subsequent calls to MLPContinueTraining() +will advance training progress one iteration further. -
    - -
    -
    /************************************************************************* -Copying of neural network +EXAMPLE: + > + > ...initialize network and trainer object.... + > + > MLPStartTraining(Trainer, Network, True) + > while MLPContinueTraining(Trainer, Network) do + > ...visualize training progress... + > INPUT PARAMETERS: - Network1 - original + S - trainer object + Network - neural network. It must have same number of inputs and + output/classes as was specified during creation of the + trainer object. + RandomStart - randomize network before training or not: + * True means that network is randomized and its + initial state (one which was passed to the trainer + object) is lost. + * False means that training is started from the + current state of the network OUTPUT PARAMETERS: - Network2 - copy + Network - neural network which is ready to training (weights are + initialized, preprocessor is initialized using current + training set) + +NOTE: this method uses sum-of-squares error function for training. + +NOTE: it is expected that trainer object settings are NOT changed during + step-by-step training, i.e. no one changes stopping criteria or + training set during training. It is possible and there is no defense + against such actions, but algorithm behavior in such cases is + undefined and can be unpredictable. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcopy( - multilayerperceptron network1, - multilayerperceptron& network2); +
    void alglib::mlpstarttraining( + mlptrainer s, + multilayerperceptron network, + bool randomstart, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function copies tunable parameters (weights/means/sigmas) from one -network to another with same architecture. It performs some rudimentary -checks that architectures are same, and throws exception if check fails. +This function trains neural network ensemble passed to this function using +current dataset and early stopping training algorithm. Each early stopping +round performs NRestarts random restarts (thus, EnsembleSize*NRestarts +training rounds is performed in total). -It is intended for fast copying of states between two network which are -known to have same geometry. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - Network1 - source, must be correctly initialized - Network2 - target, must have same architecture + S - trainer object; + Ensemble - neural network ensemble. It must have same number of + inputs and outputs/classes as was specified during + creation of the trainer object. + NRestarts - number of restarts, >=0: + * NRestarts>0 means that specified number of random + restarts are performed during each ES round; + * NRestarts=0 is silently replaced by 1. OUTPUT PARAMETERS: - Network2 - network state is copied from source to target - - -- ALGLIB -- - Copyright 20.06.2013 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcopytunableparameters( - multilayerperceptron network1, - multilayerperceptron network2); - -
    - -
    -
    /************************************************************************* -Creates neural network with NIn inputs, NOut outputs, without hidden -layers, with linear output layer. Network weights are filled with small -random values. - - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreate0( - ae_int_t nin, - ae_int_t nout, - multilayerperceptron& network); + Ensemble - trained ensemble; + Rep - it contains all type of errors. -
    - -
    -
    /************************************************************************* -Same as MLPCreate0, but with one hidden layer (NHid neurons) with -non-linear activation function. Output layer is linear. +NOTE: this training method uses BOTH early stopping and weight decay! So, + you should select weight decay before starting training just as you + select it before training "conventional" networks. - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreate1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - multilayerperceptron& network); +NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), + or single-point dataset was passed, ensemble is filled by zero + values. -
    - -
    -
    /************************************************************************* -Same as MLPCreate0, but with two hidden layers (NHid1 and NHid2 neurons) -with non-linear activation function. Output layer is linear. - $ALL +NOTE: this method uses sum-of-squares error function for training. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 22.08.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcreate2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - multilayerperceptron& network); +
    void alglib::mlptrainensemblees( + mlptrainer s, + mlpensemble ensemble, + ae_int_t nrestarts, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Creates neural network with NIn inputs, NOut outputs, without hidden -layers with non-linear output layer. Network weights are filled with small -random values. - -Activation function of the output layer takes values: +Neural network training using early stopping (base algorithm - L-BFGS with +regularization). - (B, +INF), if D>=0 +INPUT PARAMETERS: + Network - neural network with initialized geometry + TrnXY - training set + TrnSize - training set size, TrnSize>0 + ValXY - validation set + ValSize - validation set size, ValSize>0 + Decay - weight decay constant, >=0.001 + Decay term 'Decay*||Weights||^2' is added to error + function. + If you don't know what Decay to choose, use 0.001. + Restarts - number of restarts, either: + * strictly positive number - algorithm make specified + number of restarts from random position. + * -1, in which case algorithm makes exactly one run + from the initial state of the network (no randomization). + If you don't know what Restarts to choose, choose one + one the following: + * -1 (deterministic start) + * +1 (one random restart) + * +5 (moderate amount of random restarts) -or +OUTPUT PARAMETERS: + Network - trained neural network. + Info - return code: + * -2, if there is a point with class number + outside of [0..NOut-1]. + * -1, if wrong parameters specified + (NPoints<0, Restarts<1, ...). + * 2, task has been solved, stopping criterion met - + sufficiently small step size. Not expected (we + use EARLY stopping) but possible and not an + error. + * 6, task has been solved, stopping criterion met - + increasing of validation set error. + Rep - training report - (-INF, B), if D<0. +NOTE: +Algorithm stops if validation set error increases for a long enough or +step size is small enought (there are task where validation set may +decrease for eternity). In any case solution returned corresponds to the +minimum of validation set error. -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcreateb0( - ae_int_t nin, - ae_int_t nout, - double b, - double d, - multilayerperceptron& network); +
    void alglib::mlptraines( + multilayerperceptron network, + real_2d_array trnxy, + ae_int_t trnsize, + real_2d_array valxy, + ae_int_t valsize, + double decay, + ae_int_t restarts, + ae_int_t& info, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Same as MLPCreateB0 but with non-linear hidden layer. +Neural network training using L-BFGS algorithm with regularization. +Subroutine trains neural network with restarts from random positions. +Algorithm is well suited for problems of any dimensionality (memory +requirements and step complexity are linear by weights number). - -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreateb1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - double b, - double d, - multilayerperceptron& network); +INPUT PARAMETERS: + Network - neural network with initialized geometry + XY - training set + NPoints - training set size + Decay - weight decay constant, >=0.001 + Decay term 'Decay*||Weights||^2' is added to error + function. + If you don't know what Decay to choose, use 0.001. + Restarts - number of restarts from random position, >0. + If you don't know what Restarts to choose, use 2. + WStep - stopping criterion. Algorithm stops if step size is + less than WStep. Recommended value - 0.01. Zero step + size means stopping after MaxIts iterations. + MaxIts - stopping criterion. Algorithm stops after MaxIts + iterations (NOT gradient calculations). Zero MaxIts + means stopping when step is sufficiently small. -
    - -
    -
    /************************************************************************* -Same as MLPCreateB0 but with two non-linear hidden layers. +OUTPUT PARAMETERS: + Network - trained neural network. + Info - return code: + * -8, if both WStep=0 and MaxIts=0 + * -2, if there is a point with class number + outside of [0..NOut-1]. + * -1, if wrong parameters specified + (NPoints<0, Restarts<1). + * 2, if task has been solved. + Rep - training report -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 09.12.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcreateb2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - double b, - double d, - multilayerperceptron& network); +
    void alglib::mlptrainlbfgs( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + double wstep, + ae_int_t maxits, + ae_int_t& info, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Creates classifier network with NIn inputs and NOut possible classes. -Network contains no hidden layers and linear output layer with SOFTMAX- -normalization (so outputs sums up to 1.0 and converge to posterior -probabilities). +Neural network training using modified Levenberg-Marquardt with exact +Hessian calculation and regularization. Subroutine trains neural network +with restarts from random positions. Algorithm is well suited for small +and medium scale problems (hundreds of weights). - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreatec0( - ae_int_t nin, - ae_int_t nout, - multilayerperceptron& network); +INPUT PARAMETERS: + Network - neural network with initialized geometry + XY - training set + NPoints - training set size + Decay - weight decay constant, >=0.001 + Decay term 'Decay*||Weights||^2' is added to error + function. + If you don't know what Decay to choose, use 0.001. + Restarts - number of restarts from random position, >0. + If you don't know what Restarts to choose, use 2. -
    - -
    -
    /************************************************************************* -Same as MLPCreateC0, but with one non-linear hidden layer. +OUTPUT PARAMETERS: + Network - trained neural network. + Info - return code: + * -9, if internal matrix inverse subroutine failed + * -2, if there is a point with class number + outside of [0..NOut-1]. + * -1, if wrong parameters specified + (NPoints<0, Restarts<1). + * 2, if task has been solved. + Rep - training report -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcreatec1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - multilayerperceptron& network); +
    void alglib::mlptrainlm( + multilayerperceptron network, + real_2d_array xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + ae_int_t& info, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Same as MLPCreateC0, but with two non-linear hidden layers. +This function trains neural network passed to this function, using current +dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset()) +and current training settings. Training from NRestarts random starting +positions is performed, best network is chosen. - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreatec2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - multilayerperceptron& network); +Training is performed using current training algorithm. -
    - -
    -
    /************************************************************************* -Creates neural network with NIn inputs, NOut outputs, without hidden -layers with non-linear output layer. Network weights are filled with small -random values. Activation function of the output layer takes values [A,B]. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreater0( - ae_int_t nin, - ae_int_t nout, - double a, - double b, - multilayerperceptron& network); +INPUT PARAMETERS: + S - trainer object + Network - neural network. It must have same number of inputs and + output/classes as was specified during creation of the + trainer object. + NRestarts - number of restarts, >=0: + * NRestarts>0 means that specified number of random + restarts are performed, best network is chosen after + training + * NRestarts=0 means that current state of the network + is used for training. -
    - -
    -
    /************************************************************************* -Same as MLPCreateR0, but with non-linear hidden layer. +OUTPUT PARAMETERS: + Network - trained network - -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreater1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - double a, - double b, - multilayerperceptron& network); +NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), + network is filled by zero values. Same behavior for functions + MLPStartTraining and MLPContinueTraining. -
    - -
    -
    /************************************************************************* -Same as MLPCreateR0, but with two non-linear hidden layers. +NOTE: this method uses sum-of-squares error function for training. -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcreater2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - double a, - double b, - multilayerperceptron& network); +
    void alglib::mlptrainnetwork( + mlptrainer s, + multilayerperceptron network, + ae_int_t nrestarts, + mlpreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  

    +
    -
    /************************************************************************* -Error of the neural network on dataset. - - -FOR USERS OF COMMERCIAL EDITION: +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x, depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +using namespace alglib; -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. +int main(int argc, char **argv) +{ + // + // Suppose that we want to classify numbers as positive (class 0) and negative + // (class 1). We have training set which includes several strictly positive + // or negative numbers - and zero. + // + // The problem is that we are not sure how to classify zero, so from time to + // time we mark it as positive or negative (with equal probability). Other + // numbers are marked in pure deterministic setting. How will neural network + // cope with such classification task? + // + // NOTE: we use network with excessive amount of neurons, which guarantees + // almost exact reproduction of the training set. Generalization ability + // of such network is rather low, but we are not concerned with such + // questions in this basic demo. + // + mlptrainer trn; + multilayerperceptron network; + mlpreport rep; + real_1d_array x = "[0]"; + real_1d_array y = "[0,0]"; -RESULT: - sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + // + // Training set. One row corresponds to one record [A => class(A)]. + // + // Classes are denoted by numbers from 0 to 1, where 0 corresponds to positive + // numbers and 1 to negative numbers. + // + // [ +1 0] + // [ +2 0] + // [ -1 1] + // [ -2 1] + // [ 0 0] !! sometimes we classify 0 as positive, sometimes as negative + // [ 0 1] !! + // + real_2d_array xy = "[[+1,0],[+2,0],[-1,1],[-2,1],[0,0],[0,1]]"; -DATASET FORMAT: + // + // + // When we solve classification problems, everything is slightly different from + // the regression ones: + // + // 1. Network is created. Because we solve classification problem, we use + // mlpcreatec1() function instead of mlpcreate1(). This function creates + // classifier network with SOFTMAX-normalized outputs. This network returns + // vector of class membership probabilities which are normalized to be + // non-negative and sum to 1.0 + // + // 2. We use mlpcreatetrainercls() function instead of mlpcreatetrainer() to + // create trainer object. Trainer object process dataset and neural network + // slightly differently to account for specifics of the classification + // problems. + // + // 3. Dataset is attached to trainer object. Note that dataset format is slightly + // different from one used for regression. + // + mlpcreatetrainercls(1, 2, trn); + mlpcreatec1(1, 5, 2, network); + mlpsetdataset(trn, xy, 6); -This function uses two different dataset formats - one for regression -networks, another one for classification networks. + // + // Network is trained with 5 restarts from random positions + // + mlptrainnetwork(trn, network, 5, rep); -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + // + // Test our neural network on strictly positive and strictly negative numbers. + // + // IMPORTANT! Classifier network returns class membership probabilities instead + // of class indexes. Network returns two values (probabilities) instead of one + // (class index). + // + // Thus, for +1 we expect to get [P0,P1] = [1,0], where P0 is probability that + // number is positive (belongs to class 0), and P1 is probability that number + // is negative (belongs to class 1). + // + // For -1 we expect to get [P0,P1] = [0,1] + // + // Following properties are guaranteed by network architecture: + // * P0>=0, P1>=0 non-negativity + // * P0+P1=1 normalization + // + x = "[1]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [1.000,0.000] + x = "[-1]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.000,1.000] -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). + // + // But what our network will return for 0, which is between classes 0 and 1? + // + // In our dataset it has two different marks assigned (class 0 AND class 1). + // So network will return something average between class 0 and class 1: + // 0 => [0.5, 0.5] + // + x = "[0]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.500,0.500] + return 0; +} - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::mlperror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -double alglib::smp_mlperror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -
    - +
    -
    /************************************************************************* -Natural error function for neural network, internal subroutine. - -NOTE: this function is single-threaded. Unlike other error function, it -receives no speed-up from being executed in SMP mode. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::mlperrorn( - multilayerperceptron network, - real_2d_array xy, - ae_int_t ssize); +using namespace alglib; -
    - -
    -
    /************************************************************************* -Error of the neural network on dataset given by sparse matrix. +int main(int argc, char **argv) +{ + // + // Suppose that we want to classify numbers as positive (class 0) and negative + // (class 1). We also have one more class for zero (class 2). + // + // NOTE: we use network with excessive amount of neurons, which guarantees + // almost exact reproduction of the training set. Generalization ability + // of such network is rather low, but we are not concerned with such + // questions in this basic demo. + // + mlptrainer trn; + multilayerperceptron network; + mlpreport rep; + real_1d_array x = "[0]"; + real_1d_array y = "[0,0,0]"; -FOR USERS OF COMMERCIAL EDITION: + // + // Training set. One row corresponds to one record [A => class(A)]. + // + // Classes are denoted by numbers from 0 to 2, where 0 corresponds to positive + // numbers, 1 to negative numbers, 2 to zero + // + // [ +1 0] + // [ +2 0] + // [ -1 1] + // [ -2 1] + // [ 0 2] + // + real_2d_array xy = "[[+1,0],[+2,0],[-1,1],[-2,1],[0,2]]"; - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x, depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. + // + // + // When we solve classification problems, everything is slightly different from + // the regression ones: + // + // 1. Network is created. Because we solve classification problem, we use + // mlpcreatec1() function instead of mlpcreate1(). This function creates + // classifier network with SOFTMAX-normalized outputs. This network returns + // vector of class membership probabilities which are normalized to be + // non-negative and sum to 1.0 + // + // 2. We use mlpcreatetrainercls() function instead of mlpcreatetrainer() to + // create trainer object. Trainer object process dataset and neural network + // slightly differently to account for specifics of the classification + // problems. + // + // 3. Dataset is attached to trainer object. Note that dataset format is slightly + // different from one used for regression. + // + mlpcreatetrainercls(1, 3, trn); + mlpcreatec1(1, 5, 3, network); + mlpsetdataset(trn, xy, 5); + // + // Network is trained with 5 restarts from random positions + // + mlptrainnetwork(trn, network, 5, rep); -INPUT PARAMETERS: - Network - neural network - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0 + // + // Test our neural network on strictly positive and strictly negative numbers. + // + // IMPORTANT! Classifier network returns class membership probabilities instead + // of class indexes. Network returns three values (probabilities) instead of one + // (class index). + // + // Thus, for +1 we expect to get [P0,P1,P2] = [1,0,0], + // for -1 we expect to get [P0,P1,P2] = [0,1,0], + // and for 0 we will get [P0,P1,P2] = [0,0,1]. + // + // Following properties are guaranteed by network architecture: + // * P0>=0, P1>=0, P2>=0 non-negativity + // * P0+P1+P2=1 normalization + // + x = "[1]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [1.000,0.000,0.000] + x = "[-1]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.000,1.000,0.000] + x = "[0]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.000,0.000,1.000] + return 0; +} -RESULT: - sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) -DATASET FORMAT: +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
     
    -This  function  uses  two  different  dataset formats - one for regression
    -networks, another one for classification networks.
    +using namespace alglib;
     
    -For regression networks with NIn inputs and NOut outputs following dataset
    -format is used:
    -* dataset is given by NPoints*(NIn+NOut) matrix
    -* each row corresponds to one example
    -* first NIn columns are inputs, next NOut columns are outputs
     
    -For classification networks with NIn inputs and NClasses clases  following
    -dataset format is used:
    -* dataset is given by NPoints*(NIn+1) matrix
    -* each row corresponds to one example
    -* first NIn columns are inputs, last column stores class number (from 0 to
    -  NClasses-1).
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example shows how to perform cross-validation with ALGLIB
    +    //
    +    mlptrainer trn;
    +    multilayerperceptron network;
    +    mlpreport rep;
     
    -  -- ALGLIB --
    -     Copyright 23.07.2012 by Bochkanov Sergey
    -*************************************************************************/
    -
    double alglib::mlperrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); -double alglib::smp_mlperrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); + // + // Training set: f(x)=1/(x^2+1) + // One row corresponds to one record [x,f(x)] + // + real_2d_array xy = "[[-2.0,0.2],[-1.6,0.3],[-1.3,0.4],[-1,0.5],[-0.6,0.7],[-0.3,0.9],[0,1],[2.0,0.2],[1.6,0.3],[1.3,0.4],[1,0.5],[0.6,0.7],[0.3,0.9]]"; -
    - -
    -
    /************************************************************************* -Error of the neural network on subset of sparse dataset. + // + // Trainer object is created. + // Dataset is attached to trainer object. + // + // NOTE: it is not good idea to perform cross-validation on sample + // as small as ours (13 examples). It is done for demonstration + // purposes only. Generalization error estimates won't be + // precise enough for practical purposes. + // + mlpcreatetrainer(1, 1, trn); + mlpsetdataset(trn, xy, 13); + // + // The key property of the cross-validation is that it estimates + // generalization properties of neural ARCHITECTURE. It does NOT + // estimates generalization error of some specific network which + // is passed to the k-fold CV routine. + // + // In our example we create 1x4x1 neural network and pass it to + // CV routine without training it. Original state of the network + // is not used for cross-validation - each round is restarted from + // random initial state. Only geometry of network matters. + // + // We perform 5 restarts from different random positions for each + // of the 10 cross-validation rounds. + // + mlpcreate1(1, 4, 1, network); + mlpkfoldcv(trn, network, 5, 10, rep); -FOR USERS OF COMMERCIAL EDITION: + // + // Cross-validation routine stores estimates of the generalization + // error to MLP report structure. You may examine its fields and + // see estimates of different errors (RMS, CE, Avg). + // + // Because cross-validation is non-deterministic, in our manual we + // can not say what values will be stored to rep after call to + // mlpkfoldcv(). Every CV round will return slightly different + // estimates. + // + return 0; +} - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
     
    -INPUT PARAMETERS:
    -    Network   -     neural network;
    -    XY        -     training  set,  see  below  for  information  on   the
    -                    training set format. This function checks  correctness
    -                    of  the  dataset  (no  NANs/INFs,  class  numbers  are
    -                    correct) and throws exception when  incorrect  dataset
    -                    is passed.  Sparse  matrix  must  use  CRS  format for
    -                    storage.
    -    SetSize   -     real size of XY, SetSize>=0;
    -                    it is used when SubsetSize<0;
    -    Subset    -     subset of SubsetSize elements, array[SubsetSize];
    -    SubsetSize-     number of elements in Subset[] array:
    -                    * if SubsetSize>0, rows of XY with indices Subset[0]...
    -                      ...Subset[SubsetSize-1] are processed
    -                    * if SubsetSize=0, zeros are returned
    -                    * if SubsetSize<0, entire dataset is  processed;  Subset[]
    -                      array is ignored in this case.
    +using namespace alglib;
     
    -RESULT:
    -    sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2)
     
    -DATASET FORMAT:
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example shows how to train early stopping ensebles.
    +    //
    +    mlptrainer trn;
    +    mlpensemble ensemble;
    +    mlpreport rep;
     
    -This  function  uses  two  different  dataset formats - one for regression
    -networks, another one for classification networks.
    +    //
    +    // Training set: f(x)=1/(x^2+1)
    +    // One row corresponds to one record [x,f(x)]
    +    //
    +    real_2d_array xy = "[[-2.0,0.2],[-1.6,0.3],[-1.3,0.4],[-1,0.5],[-0.6,0.7],[-0.3,0.9],[0,1],[2.0,0.2],[1.6,0.3],[1.3,0.4],[1,0.5],[0.6,0.7],[0.3,0.9]]";
     
    -For regression networks with NIn inputs and NOut outputs following dataset
    -format is used:
    -* dataset is given by NPoints*(NIn+NOut) matrix
    -* each row corresponds to one example
    -* first NIn columns are inputs, next NOut columns are outputs
    +    //
    +    // Trainer object is created.
    +    // Dataset is attached to trainer object.
    +    //
    +    // NOTE: it is not good idea to use early stopping ensemble on sample
    +    //       as small as ours (13 examples). It is done for demonstration
    +    //       purposes only. Ensemble training algorithm won't find good
    +    //       solution on such small sample.
    +    //
    +    mlpcreatetrainer(1, 1, trn);
    +    mlpsetdataset(trn, xy, 13);
     
    -For classification networks with NIn inputs and NClasses clases  following
    -dataset format is used:
    -* dataset is given by NPoints*(NIn+1) matrix
    -* each row corresponds to one example
    -* first NIn columns are inputs, last column stores class number (from 0 to
    -  NClasses-1).
    +    //
    +    // Ensemble is created and trained. Each of 50 network is trained
    +    // with 5 restarts.
    +    //
    +    mlpecreate1(1, 4, 1, 50, ensemble);
    +    mlptrainensemblees(trn, ensemble, 5, rep);
    +    return 0;
    +}
     
    -  -- ALGLIB --
    -     Copyright 04.09.2012 by Bochkanov Sergey
    -*************************************************************************/
    -
    double alglib::mlperrorsparsesubset( - multilayerperceptron network, - sparsematrix xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize); -double alglib::smp_mlperrorsparsesubset( - multilayerperceptron network, - sparsematrix xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize); -
    - +
    -
    /************************************************************************* -Error of the neural network on subset of dataset. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" +using namespace alglib; -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - SetSize - real size of XY, SetSize>=0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. -RESULT: - sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) +int main(int argc, char **argv) +{ + // + // This example shows how to use parallel functionality of ALGLIB. + // We generate simple 1-dimensional regression problem and show how + // to use parallel training, parallel cross-validation, parallel + // training of neural ensembles. + // + // We assume that you already know how to use ALGLIB in serial mode + // and concentrate on its parallel capabilities. + // + // NOTE: it is not good idea to use parallel features on sample as small + // as ours (13 examples). It is done only for demonstration purposes. + // + mlptrainer trn; + multilayerperceptron network; + mlpensemble ensemble; + mlpreport rep; + real_2d_array xy = "[[-2.0,0.2],[-1.6,0.3],[-1.3,0.4],[-1,0.5],[-0.6,0.7],[-0.3,0.9],[0,1],[2.0,0.2],[1.6,0.3],[1.3,0.4],[1,0.5],[0.6,0.7],[0.3,0.9]]"; + mlpcreatetrainer(1, 1, trn); + mlpsetdataset(trn, xy, 13); + mlpcreate1(1, 4, 1, network); + mlpecreate1(1, 4, 1, 50, ensemble); -DATASET FORMAT: + // + // Below we demonstrate how to perform: + // * parallel training of individual networks + // * parallel cross-validation + // * parallel training of neural ensembles + // + // In order to use multithreading, you have to: + // 1) Install SMP edition of ALGLIB. + // 2) This step is specific for C++ users: you should activate OS-specific + // capabilities of ALGLIB by defining AE_OS=AE_POSIX (for *nix systems) + // or AE_OS=AE_WINDOWS (for Windows systems). + // C# users do not have to perform this step because C# programs are + // portable across different systems without OS-specific tuning. + // 3) Tell ALGLIB that you want it to use multithreading by means of + // setnworkers() call: + // * alglib::setnworkers(0) = use all cores + // * alglib::setnworkers(-1) = leave one core unused + // * alglib::setnworkers(-2) = leave two cores unused + // * alglib::setnworkers(+2) = use 2 cores (even if you have more) + // During runtime ALGLIB will automatically determine whether it is + // feasible to start worker threads and split your task between cores. + // + alglib::setnworkers(+2); -This function uses two different dataset formats - one for regression -networks, another one for classification networks. + // + // First, we perform parallel training of individual network with 5 + // restarts from random positions. These 5 rounds of training are + // executed in parallel manner, with best network chosen after + // training. + // + // ALGLIB can use additional way to speed up computations - divide + // dataset into smaller subsets and process these subsets + // simultaneously. It allows us to efficiently parallelize even + // single training round. This operation is performed automatically + // for large datasets, but our toy dataset is too small. + // + mlptrainnetwork(trn, network, 5, rep); -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + // + // Then, we perform parallel 10-fold cross-validation, with 5 random + // restarts per each CV round. I.e., 5*10=50 networks are trained + // in total. All these operations can be parallelized. + // + // NOTE: again, ALGLIB can parallelize calculation of gradient + // over entire dataset - but our dataset is too small. + // + mlpkfoldcv(trn, network, 5, 10, rep); -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). + // + // Finally, we train early stopping ensemble of 50 neural networks, + // each of them is trained with 5 random restarts. I.e., 5*50=250 + // networks aretrained in total. + // + mlptrainensemblees(trn, ensemble, 5, rep); + return 0; +} - -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::mlperrorsubset( - multilayerperceptron network, - real_2d_array xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize); -double alglib::smp_mlperrorsubset( - multilayerperceptron network, - real_2d_array xy, - ae_int_t setsize, - integer_1d_array subset, - ae_int_t subsetsize); -
    - +
    -
    /************************************************************************* -This function returns offset/scaling coefficients for I-th input of the -network. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" -INPUT PARAMETERS: - Network - network - I - input index +using namespace alglib; -OUTPUT PARAMETERS: - Mean - mean term - Sigma - sigma term, guaranteed to be nonzero. -I-th input is passed through linear transformation - IN[i] = (IN[i]-Mean)/Sigma -before feeding to the network +int main(int argc, char **argv) +{ + // + // The very simple example on neural network: network is trained to reproduce + // small 2x2 multiplication table. + // + // NOTE: we use network with excessive amount of neurons, which guarantees + // almost exact reproduction of the training set. Generalization ability + // of such network is rather low, but we are not concerned with such + // questions in this basic demo. + // + mlptrainer trn; + multilayerperceptron network; + mlpreport rep; - -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpgetinputscaling( - multilayerperceptron network, - ae_int_t i, - double& mean, - double& sigma); + // + // Training set: + // * one row corresponds to one record A*B=C in the multiplication table + // * first two columns store A and B, last column stores C + // + // [1 * 1 = 1] + // [1 * 2 = 2] + // [2 * 1 = 2] + // [2 * 2 = 4] + // + real_2d_array xy = "[[1,1,1],[1,2,2],[2,1,2],[2,2,4]]"; -
    - -
    -
    /************************************************************************* -Returns number of inputs. + // + // Network is created. + // Trainer object is created. + // Dataset is attached to trainer object. + // + mlpcreatetrainer(2, 1, trn); + mlpcreate1(2, 5, 1, network); + mlpsetdataset(trn, xy, 4); - -- ALGLIB -- - Copyright 19.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::mlpgetinputscount(multilayerperceptron network); + // + // Network is trained with 5 restarts from random positions + // + mlptrainnetwork(trn, network, 5, rep); -
    - -
    -
    /************************************************************************* -This function returns total number of layers (including input, hidden and -output layers). + // + // 2*2=? + // + real_1d_array x = "[2,2]"; + real_1d_array y = "[0]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [4.000] + return 0; +} - -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::mlpgetlayerscount(multilayerperceptron network); -
    - +
    -
    /************************************************************************* -This function returns size of K-th layer. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" -K=0 corresponds to input layer, K=CNT-1 corresponds to output layer. +using namespace alglib; -Size of the output layer is always equal to the number of outputs, although -when we have softmax-normalized network, last neuron doesn't have any -connections - it is just zero. - -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::mlpgetlayersize( - multilayerperceptron network, - ae_int_t k); +int main(int argc, char **argv) +{ + // + // Network with 2 inputs and 2 outputs is trained to reproduce vector function: + // (x0,x1) => (x0+x1, x0*x1) + // + // Informally speaking, we want neural network to simultaneously calculate + // both sum of two numbers and their product. + // + // NOTE: we use network with excessive amount of neurons, which guarantees + // almost exact reproduction of the training set. Generalization ability + // of such network is rather low, but we are not concerned with such + // questions in this basic demo. + // + mlptrainer trn; + multilayerperceptron network; + mlpreport rep; -
    - -
    -
    /************************************************************************* -This function returns information about Ith neuron of Kth layer + // + // Training set. One row corresponds to one record [A,B,A+B,A*B]. + // + // [ 1 1 1+1 1*1 ] + // [ 1 2 1+2 1*2 ] + // [ 2 1 2+1 2*1 ] + // [ 2 2 2+2 2*2 ] + // + real_2d_array xy = "[[1,1,2,1],[1,2,3,2],[2,1,3,2],[2,2,4,4]]"; -INPUT PARAMETERS: - Network - network - K - layer index - I - neuron index (within layer) + // + // Network is created. + // Trainer object is created. + // Dataset is attached to trainer object. + // + mlpcreatetrainer(2, 2, trn); + mlpcreate1(2, 5, 2, network); + mlpsetdataset(trn, xy, 4); -OUTPUT PARAMETERS: - FKind - activation function type (used by MLPActivationFunction()) - this value is zero for input or linear neurons - Threshold - also called offset, bias - zero for input neurons + // + // Network is trained with 5 restarts from random positions + // + mlptrainnetwork(trn, network, 5, rep); -NOTE: this function throws exception if layer or neuron with given index -do not exists. + // + // 2+1=? + // 2*1=? + // + real_1d_array x = "[2,1]"; + real_1d_array y = "[0,0]"; + mlpprocess(network, x, y); + printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [3.000,2.000] + return 0; +} - -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpgetneuroninfo( - multilayerperceptron network, - ae_int_t k, - ae_int_t i, - ae_int_t& fkind, - double& threshold); -
    - +
    -
    /************************************************************************* -This function returns offset/scaling coefficients for I-th output of the -network. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "dataanalysis.h" -INPUT PARAMETERS: - Network - network - I - input index +using namespace alglib; -OUTPUT PARAMETERS: - Mean - mean term - Sigma - sigma term, guaranteed to be nonzero. -I-th output is passed through linear transformation - OUT[i] = OUT[i]*Sigma+Mean -before returning it to user. In case we have SOFTMAX-normalized network, -we return (Mean,Sigma)=(0.0,1.0). +int main(int argc, char **argv) +{ + // + // Trainer object is used to train network. It stores dataset, training settings, + // and other information which is NOT part of neural network. You should use + // trainer object as follows: + // (1) you create trainer object and specify task type (classification/regression) + // and number of inputs/outputs + // (2) you add dataset to the trainer object + // (3) you may change training settings (stopping criteria or weight decay) + // (4) finally, you may train one or more networks + // + // You may interleave stages 2...4 and repeat them many times. Trainer object + // remembers its internal state and can be used several times after its creation + // and initialization. + // + mlptrainer trn; - -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpgetoutputscaling( - multilayerperceptron network, - ae_int_t i, - double& mean, - double& sigma); + // + // Stage 1: object creation. + // + // We have to specify number of inputs and outputs. Trainer object can be used + // only for problems with same number of inputs/outputs as was specified during + // its creation. + // + // In case you want to train SOFTMAX-normalized network which solves classification + // problems, you must use another function to create trainer object: + // mlpcreatetrainercls(). + // + // Below we create trainer object which can be used to train regression networks + // with 2 inputs and 1 output. + // + mlpcreatetrainer(2, 1, trn); -
    - -
    -
    /************************************************************************* -Returns number of outputs. + // + // Stage 2: specification of the training set + // + // By default trainer object stores empty dataset. So to solve your non-empty problem + // you have to set dataset by passing to trainer dense or sparse matrix. + // + // One row of the matrix corresponds to one record A*B=C in the multiplication table. + // First two columns store A and B, last column stores C + // + // [1 * 1 = 1] [ 1 1 1 ] + // [1 * 2 = 2] [ 1 2 2 ] + // [2 * 1 = 2] = [ 2 1 2 ] + // [2 * 2 = 4] [ 2 2 4 ] + // + real_2d_array xy = "[[1,1,1],[1,2,2],[2,1,2],[2,2,4]]"; + mlpsetdataset(trn, xy, 4); - -- ALGLIB -- - Copyright 19.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::mlpgetoutputscount(multilayerperceptron network); + // + // Stage 3: modification of the training parameters. + // + // You may modify parameters like weights decay or stopping criteria: + // * we set moderate weight decay + // * we choose iterations limit as stopping condition (another condition - step size - + // is zero, which means than this condition is not active) + // + double wstep = 0.000; + ae_int_t maxits = 100; + mlpsetdecay(trn, 0.01); + mlpsetcond(trn, wstep, maxits); -
    - -
    -
    /************************************************************************* -This function returns information about connection from I0-th neuron of -K0-th layer to I1-th neuron of K1-th layer. + // + // Stage 4: training. + // + // We will train several networks with different architecture using same trainer object. + // We may change training parameters or even dataset, so different networks are trained + // differently. But in this simple example we will train all networks with same settings. + // + // We create and train three networks: + // * network 1 has 2x1 architecture (2 inputs, no hidden neurons, 1 output) + // * network 2 has 2x5x1 architecture (2 inputs, 5 hidden neurons, 1 output) + // * network 3 has 2x5x5x1 architecture (2 inputs, two hidden layers, 1 output) + // + // NOTE: these networks solve regression problems. For classification problems you + // should use mlpcreatec0/c1/c2 to create neural networks which have SOFTMAX- + // normalized outputs. + // + multilayerperceptron net1; + multilayerperceptron net2; + multilayerperceptron net3; + mlpreport rep; -INPUT PARAMETERS: - Network - network - K0 - layer index - I0 - neuron index (within layer) - K1 - layer index - I1 - neuron index (within layer) + mlpcreate0(2, 1, net1); + mlpcreate1(2, 5, 1, net2); + mlpcreate2(2, 5, 5, 1, net3); -RESULT: - connection weight (zero for non-existent connections) + mlptrainnetwork(trn, net1, 5, rep); + mlptrainnetwork(trn, net2, 5, rep); + mlptrainnetwork(trn, net3, 5, rep); + return 0; +} -This function: -1. throws exception if layer or neuron with given index do not exists. -2. returns zero if neurons exist, but there is no connection between them - -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey +
    + + +
    +
    /************************************************************************* +KD-tree object. *************************************************************************/ -
    double alglib::mlpgetweight( - multilayerperceptron network, - ae_int_t k0, - ae_int_t i0, - ae_int_t k1, - ae_int_t i1); +
    class kdtree +{ +};
    - +
     
    /************************************************************************* -Returns number of weights. +Buffer object which is used to perform nearest neighbor requests in the +multithreaded mode (multiple threads working with same KD-tree object). - -- ALGLIB -- - Copyright 19.10.2011 by Bochkanov Sergey +This object should be created with KDTreeCreateRequestBuffer(). *************************************************************************/ -
    ae_int_t alglib::mlpgetweightscount(multilayerperceptron network); +
    class kdtreerequestbuffer +{ +};
    - +
     
    /************************************************************************* -Gradient calculation +KD-tree creation -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - X - input vector, length of array must be at least NIn - DesiredY- desired outputs, length of array must be at least NOut - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +This subroutine creates KD-tree from set of X-values and optional Y-values -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, array[WCount] +INPUT PARAMETERS + XY - dataset, array[0..N-1,0..NX+NY-1]. + one row corresponds to one point. + first NX columns contain X-values, next NY (NY may be zero) + columns may contain associated Y-values + N - number of points, N>=0. + NX - space dimension, NX>=1. + NY - number of optional Y-values, NY>=0. + NormType- norm type: + * 0 denotes infinity-norm + * 1 denotes 1-norm + * 2 denotes 2-norm (Euclidean norm) + +OUTPUT PARAMETERS + KDT - KD-tree + + +NOTES + +1. KD-tree creation have O(N*logN) complexity and O(N*(2*NX+NY)) memory + requirements. +2. Although KD-trees may be used with any combination of N and NX, they + are more efficient than brute-force search only when N >> 4^NX. So they + are most useful in low-dimensional tasks (NX=2, NX=3). NX=1 is another + inefficient case, because simple binary search (without additional + structures) is much more efficient in such tasks than KD-trees. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgrad( - multilayerperceptron network, - real_1d_array x, - real_1d_array desiredy, - double& e, - real_1d_array& grad); +
    void alglib::kdtreebuild( + real_2d_array xy, + ae_int_t nx, + ae_int_t ny, + ae_int_t normtype, + kdtree& kdt, + const xparams _params = alglib::xdefault); +void alglib::kdtreebuild( + real_2d_array xy, + ae_int_t n, + ae_int_t nx, + ae_int_t ny, + ae_int_t normtype, + kdtree& kdt, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Batch gradient calculation for a set of inputs/outputs - +KD-tree creation -FOR USERS OF COMMERCIAL EDITION: +This subroutine creates KD-tree from set of X-values, integer tags and +optional Y-values - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +INPUT PARAMETERS + XY - dataset, array[0..N-1,0..NX+NY-1]. + one row corresponds to one point. + first NX columns contain X-values, next NY (NY may be zero) + columns may contain associated Y-values + Tags - tags, array[0..N-1], contains integer tags associated + with points. + N - number of points, N>=0 + NX - space dimension, NX>=1. + NY - number of optional Y-values, NY>=0. + NormType- norm type: + * 0 denotes infinity-norm + * 1 denotes 1-norm + * 2 denotes 2-norm (Euclidean norm) +OUTPUT PARAMETERS + KDT - KD-tree -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in dense format; one sample = one row: - * first NIn columns contain inputs, - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SSize - number of elements in XY - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +NOTES -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, array[WCount] +1. KD-tree creation have O(N*logN) complexity and O(N*(2*NX+NY)) memory + requirements. +2. Although KD-trees may be used with any combination of N and NX, they + are more efficient than brute-force search only when N >> 4^NX. So they + are most useful in low-dimensional tasks (NX=2, NX=3). NX=1 is another + inefficient case, because simple binary search (without additional + structures) is much more efficient in such tasks than KD-trees. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgradbatch( - multilayerperceptron network, +
    void alglib::kdtreebuildtagged( real_2d_array xy, - ae_int_t ssize, - double& e, - real_1d_array& grad); -void alglib::smp_mlpgradbatch( - multilayerperceptron network, + integer_1d_array tags, + ae_int_t nx, + ae_int_t ny, + ae_int_t normtype, + kdtree& kdt, + const xparams _params = alglib::xdefault); +void alglib::kdtreebuildtagged( real_2d_array xy, - ae_int_t ssize, - double& e, - real_1d_array& grad); + integer_1d_array tags, + ae_int_t n, + ae_int_t nx, + ae_int_t ny, + ae_int_t normtype, + kdtree& kdt, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Batch gradient calculation for a set of inputs/outputs given by sparse -matrices +This function creates buffer structure which can be used to perform +parallel KD-tree requests. +KD-tree subpackage provides two sets of request functions - ones which use +internal buffer of KD-tree object (these functions are single-threaded +because they use same buffer, which can not shared between threads), and +ones which use external buffer. -FOR USERS OF COMMERCIAL EDITION: +This function is used to initialize external buffer. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +INPUT PARAMETERS + KDT - KD-tree which is associated with newly created buffer +OUTPUT PARAMETERS + Buf - external buffer. -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in sparse format; one sample = one row: - * MATRIX MUST BE STORED IN CRS FORMAT - * first NIn columns contain inputs. - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SSize - number of elements in XY - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, array[WCount] +IMPORTANT: KD-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use buffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 26.07.2012 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgradbatchsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t ssize, - double& e, - real_1d_array& grad); -void alglib::smp_mlpgradbatchsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t ssize, - double& e, - real_1d_array& grad); +
    void alglib::kdtreecreaterequestbuffer( + kdtree kdt, + kdtreerequestbuffer& buf, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Batch gradient calculation for a set of inputs/outputs for a subset of -dataset given by set of indexes. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +K-NN query: approximate K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryAKNN() ("Ts" stands for "thread-safe"). -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in sparse format; one sample = one row: - * MATRIX MUST BE STORED IN CRS FORMAT - * first NIn columns contain inputs, - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SetSize - real size of XY, SetSize>=0; - Idx - subset of SubsetSize elements, array[SubsetSize]: - * Idx[I] stores row index in the original dataset which is - given by XY. Gradient is calculated with respect to rows - whose indexes are stored in Idx[]. - * Idx[] must store correct indexes; this function throws - an exception in case incorrect index (less than 0 or - larger than rows(XY)) is given - * Idx[] may store indexes in any order and even with - repetitions. - SubsetSize- number of elements in Idx[] array: - * positive value means that subset given by Idx[] is processed - * zero value results in zero gradient - * negative value means that full dataset is processed - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, - array[WCount] +RESULT + number of actual neighbors found (either K or N, if K>N). -NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse - function. +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -- ALGLIB -- - Copyright 26.07.2012 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgradbatchsparsesubset( - multilayerperceptron network, - sparsematrix xy, - ae_int_t setsize, - integer_1d_array idx, - ae_int_t subsetsize, - double& e, - real_1d_array& grad); -void alglib::smp_mlpgradbatchsparsesubset( - multilayerperceptron network, - sparsematrix xy, - ae_int_t setsize, - integer_1d_array idx, - ae_int_t subsetsize, - double& e, - real_1d_array& grad); +
    ae_int_t alglib::kdtreequeryaknn( + kdtree kdt, + real_1d_array x, + ae_int_t k, + double eps, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreequeryaknn( + kdtree kdt, + real_1d_array x, + ae_int_t k, + bool selfmatch, + double eps, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Batch gradient calculation for a subset of dataset +Box query: all points within user-specified box. +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryBox() ("Ts" stands for "thread-safe"). -FOR USERS OF COMMERCIAL EDITION: +INPUT PARAMETERS + KDT - KD-tree + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +RESULT + number of actual neighbors found (in [0,N]). -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in dense format; one sample = one row: - * first NIn columns contain inputs, - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SetSize - real size of XY, SetSize>=0; - Idx - subset of SubsetSize elements, array[SubsetSize]: - * Idx[I] stores row index in the original dataset which is - given by XY. Gradient is calculated with respect to rows - whose indexes are stored in Idx[]. - * Idx[] must store correct indexes; this function throws - an exception in case incorrect index (less than 0 or - larger than rows(XY)) is given - * Idx[] may store indexes in any order and even with - repetitions. - SubsetSize- number of elements in Idx[] array: - * positive value means that subset given by Idx[] is processed - * zero value results in zero gradient - * negative value means that full dataset is processed - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() returns zeros for this request -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, - array[WCount] +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). -- ALGLIB -- - Copyright 26.07.2012 by Bochkanov Sergey + Copyright 14.05.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgradbatchsubset( - multilayerperceptron network, - real_2d_array xy, - ae_int_t setsize, - integer_1d_array idx, - ae_int_t subsetsize, - double& e, - real_1d_array& grad); -void alglib::smp_mlpgradbatchsubset( - multilayerperceptron network, - real_2d_array xy, - ae_int_t setsize, - integer_1d_array idx, - ae_int_t subsetsize, - double& e, - real_1d_array& grad); +
    ae_int_t alglib::kdtreequerybox( + kdtree kdt, + real_1d_array boxmin, + real_1d_array boxmax, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Gradient calculation (natural error function is used) +K-NN query: K nearest neighbors -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - X - input vector, length of array must be at least NIn - DesiredY- desired outputs, length of array must be at least NOut - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryKNN() ("Ts" stands for "thread-safe"). -OUTPUT PARAMETERS: - E - error function, sum-of-squares for regression networks, - cross-entropy for classification networks. - Grad - gradient of E with respect to weights of network, array[WCount] +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of actual neighbors found (either K or N, if K>N). + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgradn( - multilayerperceptron network, +
    ae_int_t alglib::kdtreequeryknn( + kdtree kdt, real_1d_array x, - real_1d_array desiredy, - double& e, - real_1d_array& grad); + ae_int_t k, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreequeryknn( + kdtree kdt, + real_1d_array x, + ae_int_t k, + bool selfmatch, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Batch gradient calculation for a set of inputs/outputs -(natural error function is used) +Distances from last query -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - set of inputs/outputs; one sample = one row; - first NIn columns contain inputs, - next NOut columns - desired outputs. - SSize - number of elements in XY - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsdistances(). -OUTPUT PARAMETERS: - E - error function, sum-of-squares for regression networks, - cross-entropy for classification networks. - Grad - gradient of E with respect to weights of network, array[WCount] +INPUT PARAMETERS + KDT - KD-tree + R - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + R - filled with distances (in corresponding norm) + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpgradnbatch( - multilayerperceptron network, - real_2d_array xy, - ae_int_t ssize, - double& e, - real_1d_array& grad); +
    void alglib::kdtreequeryresultsdistances( + kdtree kdt, + real_1d_array& r, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Batch Hessian calculation using R-algorithm. -Internal subroutine. +Distances from last query; 'interactive' variant for languages like Python +which support constructs like "R = KDTreeQueryResultsDistancesI(KDT)" +and interactive mode of interpreter. - -- ALGLIB -- - Copyright 26.01.2008 by Bochkanov Sergey. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. - Hessian calculation based on R-algorithm described in - "Fast Exact Multiplication by the Hessian", - B. A. Pearlmutter, - Neural Computation, 1994. + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlphessianbatch( - multilayerperceptron network, - real_2d_array xy, - ae_int_t ssize, - double& e, - real_1d_array& grad, - real_2d_array& h); +
    void alglib::kdtreequeryresultsdistancesi( + kdtree kdt, + real_1d_array& r, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Batch Hessian calculation (natural error function) using R-algorithm. -Internal subroutine. +Tags from last query - -- ALGLIB -- - Copyright 26.01.2008 by Bochkanov Sergey. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultstags(). - Hessian calculation based on R-algorithm described in - "Fast Exact Multiplication by the Hessian", - B. A. Pearlmutter, - Neural Computation, 1994. -*************************************************************************/ -
    void alglib::mlphessiannbatch( - multilayerperceptron network, - real_2d_array xy, - ae_int_t ssize, - double& e, - real_1d_array& grad, - real_2d_array& h); +INPUT PARAMETERS + KDT - KD-tree + Tags - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -
    - -
    -
    /************************************************************************* -Internal subroutine. +OUTPUT PARAMETERS + Tags - filled with tags associated with points, + or, when no tags were supplied, with zeros + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpinitpreprocessor( - multilayerperceptron network, - real_2d_array xy, - ae_int_t ssize); +
    void alglib::kdtreequeryresultstags( + kdtree kdt, + integer_1d_array& tags, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Tells whether network is SOFTMAX-normalized (i.e. classifier) or not. +Tags from last query; 'interactive' variant for languages like Python +which support constructs like "Tags = KDTreeQueryResultsTagsI(KDT)" and +interactive mode of interpreter. + +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::mlpissoftmax(multilayerperceptron network); +
    void alglib::kdtreequeryresultstagsi( + kdtree kdt, + integer_1d_array& tags, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Procesing +X-values from last query. -INPUT PARAMETERS: - Network - neural network - X - input vector, array[0..NIn-1]. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsx(). -OUTPUT PARAMETERS: - Y - result. Regression estimate when solving regression task, - vector of posterior probabilities for classification task. +INPUT PARAMETERS + KDT - KD-tree + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -See also MLPProcessI +OUTPUT PARAMETERS + X - rows are filled with X-values + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpprocess( - multilayerperceptron network, - real_1d_array x, - real_1d_array& y); +
    void alglib::kdtreequeryresultsx( + kdtree kdt, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -'interactive' variant of MLPProcess for languages like Python which -support constructs like "Y = MLPProcess(NN,X)" and interactive mode of the -interpreter +X-values from last query; 'interactive' variant for languages like Python +which support constructs like "X = KDTreeQueryResultsXI(KDT)" and +interactive mode of interpreter. This function allocates new array on each call, so it is significantly slower than its 'non-interactive' counterpart, but it is more convenient when you call it from command line. -- ALGLIB -- - Copyright 21.09.2010 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpprocessi( - multilayerperceptron network, - real_1d_array x, - real_1d_array& y); +
    void alglib::kdtreequeryresultsxi( + kdtree kdt, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Returns information about initialized network: number of inputs, outputs, -weights. +X- and Y-values from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsxy(). + +INPUT PARAMETERS + KDT - KD-tree + XY - possibly pre-allocated buffer. If XY is too small to store + result, it is resized. If size(XY) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + XY - rows are filled with points: first NX columns with + X-values, next NY columns - with Y-values. + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpproperties( - multilayerperceptron network, - ae_int_t& nin, - ae_int_t& nout, - ae_int_t& wcount); +
    void alglib::kdtreequeryresultsxy( + kdtree kdt, + real_2d_array& xy, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Randomization of neural network weights +XY-values from last query; 'interactive' variant for languages like Python +which support constructs like "XY = KDTreeQueryResultsXYI(KDT)" and +interactive mode of interpreter. + +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 06.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlprandomize(multilayerperceptron network); +
    void alglib::kdtreequeryresultsxyi( + kdtree kdt, + real_2d_array& xy, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Randomization of neural network weights and standartisator +R-NN query: all points within R-sphere centered at X, ordered by distance +between point and X (by ascending). + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -- ALGLIB -- - Copyright 10.03.2008 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlprandomizefull(multilayerperceptron network); +
    ae_int_t alglib::kdtreequeryrnn( + kdtree kdt, + real_1d_array x, + double r, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreequeryrnn( + kdtree kdt, + real_1d_array x, + double r, + bool selfmatch, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Relative classification error on the test set. +R-NN query: all points within R-sphere centered at X, no ordering by +distance as undicated by "U" suffix (faster that ordered query, for large +queries - significantly faster). + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True +RESULT + number of neighbors found, >=0 -FOR USERS OF COMMERCIAL EDITION: +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +As indicated by "U" suffix, this function returns unordered results. + -- ALGLIB -- + Copyright 01.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::kdtreequeryrnnu( + kdtree kdt, + real_1d_array x, + double r, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreequeryrnnu( + kdtree kdt, + real_1d_array x, + double r, + bool selfmatch, + const xparams _params = alglib::xdefault); -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. +
    + +
    +
    /************************************************************************* +This function serializes data structure to string. -RESULT: -Percent of incorrectly classified cases. Works both for classifier -networks and general purpose networks used as classifiers. +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void kdtreeserialize(kdtree &obj, std::string &s_out); +void kdtreeserialize(kdtree &obj, std::ostream &s_out); +
    + +
    +
    /************************************************************************* +K-NN query: approximate K nearest neighbors, using thread-local buffer. -DATASET FORMAT: +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +RESULT + number of actual neighbors found (either K or N, if K>N). -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 25.12.2008 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlprelclserror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -double alglib::smp_mlprelclserror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); +
    ae_int_t alglib::kdtreetsqueryaknn( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + ae_int_t k, + double eps, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreetsqueryaknn( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + ae_int_t k, + bool selfmatch, + double eps, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Relative classification error on the test set given by sparse matrix. +Box query: all points within user-specified box, using thread-local buffer. +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -FOR USERS OF COMMERCIAL EDITION: +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +RESULT + number of actual neighbors found (in [0,N]). +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "ts" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() returns zeros for this query + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. Sparse matrix must use CRS format - for storage. - NPoints - points count, >=0. + -- ALGLIB -- + Copyright 14.05.2016 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::kdtreetsquerybox( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array boxmin, + real_1d_array boxmax, + const xparams _params = alglib::xdefault); -RESULT: -Percent of incorrectly classified cases. Works both for classifier -networks and general purpose networks used as classifiers. +
    + +
    +
    /************************************************************************* +K-NN query: K nearest neighbors, using external thread-local buffer. -DATASET FORMAT: +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +INPUT PARAMETERS + KDT - kd-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +RESULT + number of actual neighbors found (either K or N, if K>N). -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlprelclserrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); -double alglib::smp_mlprelclserrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); +
    ae_int_t alglib::kdtreetsqueryknn( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + ae_int_t k, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreetsqueryknn( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + ae_int_t k, + bool selfmatch, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -RMS error on the test set given. - +Distances from last query associated with kdtreerequestbuffer object. -FOR USERS OF COMMERCIAL EDITION: +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultsdistances(). - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - - -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -Root mean square error. Its meaning for regression task is obvious. As for -classification task, RMS error means error when estimating posterior -probabilities. - -DATASET FORMAT: +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + R - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +OUTPUT PARAMETERS + R - filled with distances (in corresponding norm) -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlprmserror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); -double alglib::smp_mlprmserror( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::kdtreetsqueryresultsdistances( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array& r, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -RMS error on the test set given by sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - - -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. +Tags from last query associated with kdtreerequestbuffer object. -RESULT: -Root mean square error. Its meaning for regression task is obvious. As for -classification task, RMS error means error when estimating posterior -probabilities. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultstags(). -DATASET FORMAT: +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + Tags - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +OUTPUT PARAMETERS + Tags - filled with tags associated with points, + or, when no tags were supplied, with zeros -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlprmserrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); -double alglib::smp_mlprmserrorsparse( - multilayerperceptron network, - sparsematrix xy, - ae_int_t npoints); +
    void alglib::kdtreetsqueryresultstags( + kdtree kdt, + kdtreerequestbuffer buf, + integer_1d_array& tags, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function serializes data structure to string. +X-values from last query associated with kdtreerequestbuffer object. -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. -*************************************************************************/ -
    void mlpserialize(multilayerperceptron &obj, std::string &s_out); -
    - -
    -
    /************************************************************************* -This function sets offset/scaling coefficients for I-th input of the -network. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -INPUT PARAMETERS: - Network - network - I - input index - Mean - mean term - Sigma - sigma term (if zero, will be replaced by 1.0) +OUTPUT PARAMETERS + X - rows are filled with X-values -NTE: I-th input is passed through linear transformation - IN[i] = (IN[i]-Mean)/Sigma -before feeding to the network. This function sets Mean and Sigma. +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetinputscaling( - multilayerperceptron network, - ae_int_t i, - double mean, - double sigma); +
    void alglib::kdtreetsqueryresultsx( + kdtree kdt, + kdtreerequestbuffer buf, + real_2d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function modifies information about Ith neuron of Kth layer +X- and Y-values from last query associated with kdtreerequestbuffer object. -INPUT PARAMETERS: - Network - network - K - layer index - I - neuron index (within layer) - FKind - activation function type (used by MLPActivationFunction()) - this value must be zero for input neurons - (you can not set activation function for input neurons) - Threshold - also called offset, bias - this value must be zero for input neurons - (you can not set threshold for input neurons) +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + XY - possibly pre-allocated buffer. If XY is too small to store + result, it is resized. If size(XY) is enough to store + result, it is left unchanged. -NOTES: -1. this function throws exception if layer or neuron with given index do - not exists. -2. this function also throws exception when you try to set non-linear - activation function for input neurons (any kind of network) or for output - neurons of classifier network. -3. this function throws exception when you try to set non-zero threshold for - input neurons (any kind of network). +OUTPUT PARAMETERS + XY - rows are filled with points: first NX columns with + X-values, next NY columns - with Y-values. + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetneuroninfo( - multilayerperceptron network, - ae_int_t k, - ae_int_t i, - ae_int_t fkind, - double threshold); +
    void alglib::kdtreetsqueryresultsxy( + kdtree kdt, + kdtreerequestbuffer buf, + real_2d_array& xy, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets offset/scaling coefficients for I-th output of the -network. +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, sorted by distance between point and X (by ascending) -INPUT PARAMETERS: - Network - network - I - input index - Mean - mean term - Sigma - sigma term (if zero, will be replaced by 1.0) +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -OUTPUT PARAMETERS: +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. -NOTE: I-th output is passed through linear transformation - OUT[i] = OUT[i]*Sigma+Mean -before returning it to user. This function sets Sigma/Mean. In case we -have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything -other than(0.0,1.0) - this function will throw exception. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetoutputscaling( - multilayerperceptron network, - ae_int_t i, - double mean, - double sigma); +
    ae_int_t alglib::kdtreetsqueryrnn( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + double r, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreetsqueryrnn( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + double r, + bool selfmatch, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function modifies information about connection from I0-th neuron of -K0-th layer to I1-th neuron of K1-th layer. +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, no ordering by distance as undicated by "U" suffix +(faster that ordered query, for large queries - significantly faster). -INPUT PARAMETERS: - Network - network - K0 - layer index - I0 - neuron index (within layer) - K1 - layer index - I1 - neuron index (within layer) - W - connection weight (must be zero for non-existent - connections) +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -This function: -1. throws exception if layer or neuron with given index do not exists. -2. throws exception if you try to set non-zero weight for non-existent - connection +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetweight( - multilayerperceptron network, - ae_int_t k0, - ae_int_t i0, - ae_int_t k1, - ae_int_t i1, - double w); +
    ae_int_t alglib::kdtreetsqueryrnnu( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + double r, + const xparams _params = alglib::xdefault); +ae_int_t alglib::kdtreetsqueryrnnu( + kdtree kdt, + kdtreerequestbuffer buf, + real_1d_array x, + double r, + bool selfmatch, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* This function unserializes data structure from string. *************************************************************************/ -
    void mlpunserialize(std::string &s_in, multilayerperceptron &obj); +
    void kdtreeunserialize(const std::string &s_in, kdtree &obj); +void kdtreeunserialize(const std::istream &s_in, kdtree &obj);
    - + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "alglibmisc.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    real_2d_array a = "[[0,0],[0,1],[1,0],[1,1]]";
    +    ae_int_t nx = 2;
    +    ae_int_t ny = 0;
    +    ae_int_t normtype = 2;
    +    kdtree kdt;
    +    real_1d_array x;
    +    real_2d_array r = "[[]]";
    +    ae_int_t k;
    +    kdtreebuild(a, nx, ny, normtype, kdt);
    +    x = "[-1,0]";
    +    k = kdtreequeryknn(kdt, x, 1);
    +    printf("%d\n", int(k)); // EXPECTED: 1
    +    kdtreequeryresultsx(kdt, r);
    +    printf("%s\n", r.tostring(1).c_str()); // EXPECTED: [[0,0]]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "alglibmisc.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    real_2d_array a = "[[0,0],[0,1],[1,0],[1,1]]";
    +    ae_int_t nx = 2;
    +    ae_int_t ny = 0;
    +    ae_int_t normtype = 2;
    +    kdtree kdt0;
    +    kdtree kdt1;
    +    std::string s;
    +    real_1d_array x;
    +    real_2d_array r0 = "[[]]";
    +    real_2d_array r1 = "[[]]";
    +
    +    //
    +    // Build tree and serialize it
    +    //
    +    kdtreebuild(a, nx, ny, normtype, kdt0);
    +    alglib::kdtreeserialize(kdt0, s);
    +    alglib::kdtreeunserialize(s, kdt1);
    +
    +    //
    +    // Compare results from KNN queries
    +    //
    +    x = "[-1,0]";
    +    kdtreequeryknn(kdt0, x, 1);
    +    kdtreequeryresultsx(kdt0, r0);
    +    kdtreequeryknn(kdt1, x, 1);
    +    kdtreequeryresultsx(kdt1, r1);
    +    printf("%s\n", r0.tostring(1).c_str()); // EXPECTED: [[0,0]]
    +    printf("%s\n", r1.tostring(1).c_str()); // EXPECTED: [[0,0]]
    +    return 0;
    +}
    +
    +
    +
    - +
     
    /************************************************************************* -Neural networks ensemble + *************************************************************************/ -
    class mlpensemble +
    class nleqreport { + ae_int_t iterationscount; + ae_int_t nfunc; + ae_int_t njac; + ae_int_t terminationtype; };
    - +
     
    /************************************************************************* -Average cross-entropy (in bits per element) on the test set - -INPUT PARAMETERS: - Ensemble- ensemble - XY - test set - NPoints - test set size - -RESULT: - CrossEntropy/(NPoints*LN(2)). - Zero if ensemble solves regression task. - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpeavgce( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints); +
    class nleqstate +{ +};
    - +
     
    /************************************************************************* -Average error on the test set + LEVENBERG-MARQUARDT-LIKE NONLINEAR SOLVER -INPUT PARAMETERS: - Ensemble- ensemble - XY - test set - NPoints - test set size +DESCRIPTION: +This algorithm solves system of nonlinear equations + F[0](x[0], ..., x[n-1]) = 0 + F[1](x[0], ..., x[n-1]) = 0 + ... + F[M-1](x[0], ..., x[n-1]) = 0 +with M/N do not necessarily coincide. Algorithm converges quadratically +under following conditions: + * the solution set XS is nonempty + * for some xs in XS there exist such neighbourhood N(xs) that: + * vector function F(x) and its Jacobian J(x) are continuously + differentiable on N + * ||F(x)|| provides local error bound on N, i.e. there exists such + c1, that ||F(x)||>c1*distance(x,XS) +Note that these conditions are much more weaker than usual non-singularity +conditions. For example, algorithm will converge for any affine function +F (whether its Jacobian singular or not). -RESULT: - Its meaning for regression task is obvious. As for classification task -it means average error when estimating posterior probabilities. - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::mlpeavgerror( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints); +REQUIREMENTS: +Algorithm will request following information during its operation: +* function vector F[] and Jacobian matrix at given point X +* value of merit function f(x)=F[0]^2(x)+...+F[M-1]^2(x) at given point X + + +USAGE: +1. User initializes algorithm state with NLEQCreateLM() call +2. User tunes solver parameters with NLEQSetCond(), NLEQSetStpMax() and + other functions +3. User calls NLEQSolve() function which takes algorithm state and + pointers (delegates, etc.) to callback functions which calculate merit + function value and Jacobian. +4. User calls NLEQResults() to get solution +5. Optionally, user may call NLEQRestartFrom() to solve another problem + with same parameters (N/M) but another starting point and/or another + function vector. NLEQRestartFrom() allows to reuse already initialized + structure. -
    - -
    -
    /************************************************************************* -Average relative error on the test set INPUT PARAMETERS: - Ensemble- ensemble - XY - test set - NPoints - test set size + N - space dimension, N>1: + * if provided, only leading N elements of X are used + * if not provided, determined automatically from size of X + M - system size + X - starting point + + +OUTPUT PARAMETERS: + State - structure which stores algorithm state + + +NOTES: +1. you may tune stopping conditions with NLEQSetCond() function +2. if target function contains exp() or other fast growing functions, and + optimization algorithm makes too large steps which leads to overflow, + use NLEQSetStpMax() function to bound algorithm's steps. +3. this algorithm is a slightly modified implementation of the method + described in 'Levenberg-Marquardt method for constrained nonlinear + equations with strong local convergence properties' by Christian Kanzow + Nobuo Yamashita and Masao Fukushima and further developed in 'On the + convergence of a New Levenberg-Marquardt Method' by Jin-yan Fan and + Ya-Xiang Yuan. -RESULT: - Its meaning for regression task is obvious. As for classification task -it means average relative error when estimating posterior probabilities. -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey + Copyright 20.08.2009 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpeavgrelerror( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::nleqcreatelm( + ae_int_t m, + real_1d_array x, + nleqstate& state, + const xparams _params = alglib::xdefault); +void alglib::nleqcreatelm( + ae_int_t n, + ae_int_t m, + real_1d_array x, + nleqstate& state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreate0, but for ensembles. +This subroutine restarts CG algorithm from new point. All optimization +parameters are left unchanged. + +This function allows to solve multiple optimization problems (which +must have same number of dimensions) without object reallocation penalty. + +INPUT PARAMETERS: + State - structure used for reverse communication previously + allocated with MinCGCreate call. + X - new starting point. + BndL - new lower bounds + BndU - new upper bounds -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 30.07.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreate0( - ae_int_t nin, - ae_int_t nout, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    void alglib::nleqrestartfrom( + nleqstate state, + real_1d_array x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreate1, but for ensembles. +NLEQ solver results + +INPUT PARAMETERS: + State - algorithm state. + +OUTPUT PARAMETERS: + X - array[0..N-1], solution + Rep - optimization report: + * Rep.TerminationType completetion code: + * -4 ERROR: algorithm has converged to the + stationary point Xf which is local minimum of + f=F[0]^2+...+F[m-1]^2, but is not solution of + nonlinear system. + * 1 sqrt(f)<=EpsF. + * 5 MaxIts steps was taken + * 7 stopping conditions are too stringent, + further improvement is impossible + * Rep.IterationsCount contains iterations count + * NFEV countains number of function calculations + * ActiveConstraints contains number of active constraints -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 20.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreate1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    void alglib::nleqresults( + nleqstate state, + real_1d_array& x, + nleqreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreate2, but for ensembles. +NLEQ solver results + +Buffered implementation of NLEQResults(), which uses pre-allocated buffer +to store X[]. If buffer size is too small, it resizes buffer. It is +intended to be used in the inner cycles of performance critical algorithms +where array reallocation penalty is too large to be ignored. -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 20.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreate2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    void alglib::nleqresultsbuf( + nleqstate state, + real_1d_array& x, + nleqreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateB0, but for ensembles. +This function sets stopping conditions for the nonlinear solver + +INPUT PARAMETERS: + State - structure which stores algorithm state + EpsF - >=0 + The subroutine finishes its work if on k+1-th iteration + the condition ||F||<=EpsF is satisfied + MaxIts - maximum number of iterations. If MaxIts=0, the number of + iterations is unlimited. + +Passing EpsF=0 and MaxIts=0 simultaneously will lead to automatic +stopping criterion selection (small EpsF). + +NOTES: -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 20.08.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreateb0( - ae_int_t nin, - ae_int_t nout, - double b, - double d, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    void alglib::nleqsetcond( + nleqstate state, + double epsf, + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateB1, but for ensembles. +This function sets maximum step length + +INPUT PARAMETERS: + State - structure which stores algorithm state + StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't + want to limit step length. + +Use this subroutine when target function contains exp() or other fast +growing functions, and algorithm makes too large steps which lead to +overflow. This function allows us to reject steps that are too large (and +therefore expose us to the possible overflow) without actually calculating +function value at the x+stp*d. -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 20.08.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreateb1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - double b, - double d, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    void alglib::nleqsetstpmax( + nleqstate state, + double stpmax, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateB2, but for ensembles. +This function turns on/off reporting. + +INPUT PARAMETERS: + State - structure which stores algorithm state + NeedXRep- whether iteration reports are needed or not + +If NeedXRep is True, algorithm will call rep() callback function if it is +provided to NLEQSolve(). -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 20.08.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreateb2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - double b, - double d, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    void alglib::nleqsetxrep( + nleqstate state, + bool needxrep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateC0, but for ensembles. +This family of functions is used to launcn iterations of nonlinear solver + +These functions accept following parameters: + state - algorithm state + func - callback which calculates function (or merit function) + value func at given point x + jac - callback which calculates function vector fi[] + and Jacobian jac at given point x + rep - optional callback which is called after each iteration + can be NULL + ptr - optional pointer which is passed to func/grad/hess/jac/rep + can be NULL + -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreatec0( - ae_int_t nin, - ae_int_t nout, - ae_int_t ensemblesize, - mlpensemble& ensemble); - +
    void nleqsolve(nleqstate &state, + void (*func)(const real_1d_array &x, double &func, void *ptr), + void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), + void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, + void *ptr = NULL, + const xparams _xparams = alglib::xdefault);
    - + + +
     
    /************************************************************************* -Like MLPCreateC1, but for ensembles. +Bivariate normal CDF + +Returns the area under the bivariate Gaussian PDF with correlation +parameter equal to Rho, integrated from minus infinity to (x,y): + + + x y + - - + 1 | | | | + bvn(x,y,rho) = ------------------- | | f(u,v,rho)*du*dv + 2pi*sqrt(1-rho^2) | | | | + - - + -INF -INF + + +where + + ( u^2 - 2*rho*u*v + v^2 ) + f(u,v,rho) = exp( - ----------------------- ) + ( 2*(1-rho^2) ) + + +with -1<rho<+1 and arbitrary x, y. + +This subroutine uses high-precision approximation scheme proposed by +Alan Genz in "Numerical Computation of Rectangular Bivariate and +Trivariate Normal and t probabilities", which computes CDF with +absolute error roughly equal to 1e-14. + +This function won't fail as long as Rho is in (-1,+1) range. -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 15.11.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreatec1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    double alglib::bivariatenormalcdf( + double x, + double y, + double rho, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateC2, but for ensembles. +Bivariate normal PDF + +Returns probability density function of the bivariate Gaussian with +correlation parameter equal to Rho: + + 1 ( x^2 - 2*rho*x*y + y^2 ) + f(x,y,rho) = ----------------- * exp( - ----------------------- ) + 2pi*sqrt(1-rho^2) ( 2*(1-rho^2) ) + + +with -1<rho<+1 and arbitrary x, y. + +This function won't fail as long as Rho is in (-1,+1) range. -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + Copyright 15.11.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpecreatec2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    double alglib::bivariatenormalpdf( + double x, + double y, + double rho, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Creates ensemble from network. Only network geometry is copied. +Error function - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey +The integral is + + x + - + 2 | | 2 + erf(x) = -------- | exp( - t ) dt. + sqrt(pi) | | + - + 0 + +For 0 <= |x| < 1, erf(x) = x * P4(x**2)/Q5(x**2); otherwise +erf(x) = 1 - erfc(x). + + +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE 0,1 30000 3.7e-16 1.0e-16 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::mlpecreatefromnetwork( - multilayerperceptron network, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    double alglib::errorfunction( + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateR0, but for ensembles. +Complementary error function - -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey + 1 - erf(x) = + + inf. + - + 2 | | 2 + erfc(x) = -------- | exp( - t ) dt + sqrt(pi) | | + - + x + + +For small x, erfc(x) = 1 - erf(x); otherwise rational +approximations are computed. + + +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE 0,26.6417 30000 5.7e-14 1.5e-14 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::mlpecreater0( - ae_int_t nin, - ae_int_t nout, - double a, - double b, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    double alglib::errorfunctionc( + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateR1, but for ensembles. +Inverse of the error function - -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::mlpecreater1( - ae_int_t nin, - ae_int_t nhid, - ae_int_t nout, - double a, - double b, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    double alglib::inverf(double e, const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Like MLPCreateR2, but for ensembles. +Inverse of Normal CDF - -- ALGLIB -- - Copyright 18.02.2009 by Bochkanov Sergey +Returns the argument, x, for which the area under the +Gaussian probability density function (integrated from +minus infinity to x) is equal to y. + + +For small arguments 0 < y < exp(-2), the program computes +z = sqrt( -2.0 * log(y) ); then the approximation is +x = z - log(z)/z - (1/z) P(1/z) / Q(1/z). +There are two rational functions P/Q, one for 0 < y < exp(-32) +and the other for y up to exp(-2). For larger arguments, +w = y - 0.5, and x/sqrt(2pi) = w + w**3 R(w**2)/S(w**2)). + +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE 0.125, 1 20000 7.2e-16 1.3e-16 + IEEE 3e-308, 0.135 50000 4.6e-16 9.8e-17 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::mlpecreater2( - ae_int_t nin, - ae_int_t nhid1, - ae_int_t nhid2, - ae_int_t nout, - double a, - double b, - ae_int_t ensemblesize, - mlpensemble& ensemble); +
    double alglib::invnormalcdf( + double y0, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Return normalization type (whether ensemble is SOFTMAX-normalized or not). - - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey +Same as invnormalcdf(), deprecated name *************************************************************************/ -
    bool alglib::mlpeissoftmax(mlpensemble ensemble); +
    double alglib::invnormaldistribution( + double y0, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Procesing +Normal distribution CDF -INPUT PARAMETERS: - Ensemble- neural networks ensemble - X - input vector, array[0..NIn-1]. - Y - (possibly) preallocated buffer; if size of Y is less than - NOut, it will be reallocated. If it is large enough, it - is NOT reallocated, so we can save some time on reallocation. +Returns the area under the Gaussian probability density +function, integrated from minus infinity to x: + + x + - + 1 | | 2 + ndtr(x) = --------- | exp( - t /2 ) dt + sqrt(2pi) | | + - + -inf. + = ( 1 + erf(z) ) / 2 + = erfc(z) / 2 -OUTPUT PARAMETERS: - Y - result. Regression estimate when solving regression task, - vector of posterior probabilities for classification task. +where z = x/sqrt(2). Computation is via the functions +erf and erfc. - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey + +ACCURACY: + + Relative error: +arithmetic domain # trials peak rms + IEEE -13,0 30000 3.4e-14 6.7e-15 + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::mlpeprocess( - mlpensemble ensemble, - real_1d_array x, - real_1d_array& y); +
    double alglib::normalcdf( + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -'interactive' variant of MLPEProcess for languages like Python which -support constructs like "Y = MLPEProcess(LM,X)" and interactive mode of the -interpreter - -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. - - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey +Same as normalcdf(), obsolete name. *************************************************************************/ -
    void alglib::mlpeprocessi( - mlpensemble ensemble, - real_1d_array x, - real_1d_array& y); +
    double alglib::normaldistribution( + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Return ensemble properties (number of inputs and outputs). +Normal distribution PDF - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey +Returns Gaussian probability density function: + + 1 + f(x) = --------- * exp(-x^2/2) + sqrt(2pi) + +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier *************************************************************************/ -
    void alglib::mlpeproperties( - mlpensemble ensemble, - ae_int_t& nin, - ae_int_t& nout); +
    double alglib::normalpdf( + double x, + const xparams _params = alglib::xdefault);
    - + + +
     
    /************************************************************************* -Randomization of MLP ensemble +This object stores state of the iterative norm estimation algorithm. - -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey +You should use ALGLIB functions to work with this object. *************************************************************************/ -
    void alglib::mlperandomize(mlpensemble ensemble); +
    class normestimatorstate +{ +};
    - +
     
    /************************************************************************* -Relative classification error on the test set +This procedure initializes matrix norm estimator. + +USAGE: +1. User initializes algorithm state with NormEstimatorCreate() call +2. User calls NormEstimatorEstimateSparse() (or NormEstimatorIteration()) +3. User calls NormEstimatorResults() to get solution. INPUT PARAMETERS: - Ensemble- ensemble - XY - test set - NPoints - test set size + M - number of rows in the matrix being estimated, M>0 + N - number of columns in the matrix being estimated, N>0 + NStart - number of random starting vectors + recommended value - at least 5. + NIts - number of iterations to do with best starting vector + recommended value - at least 5. -RESULT: - percent of incorrectly classified cases. - Works both for classifier betwork and for regression networks which -are used as classifiers. +OUTPUT PARAMETERS: + State - structure which stores algorithm state + + +NOTE: this algorithm is effectively deterministic, i.e. it always returns +same result when repeatedly called for the same matrix. In fact, algorithm +uses randomized starting vectors, but internal random numbers generator +always generates same sequence of the random values (it is a feature, not +bug). + +Algorithm can be made non-deterministic with NormEstimatorSetSeed(0) call. -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey + Copyright 06.12.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlperelclserror( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::normestimatorcreate( + ae_int_t m, + ae_int_t n, + ae_int_t nstart, + ae_int_t nits, + normestimatorstate& state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -RMS error on the test set +This function estimates norm of the sparse M*N matrix A. INPUT PARAMETERS: - Ensemble- ensemble - XY - test set - NPoints - test set size + State - norm estimator state, must be initialized with a call + to NormEstimatorCreate() + A - sparse M*N matrix, must be converted to CRS format + prior to calling this function. -RESULT: - root mean square error. - Its meaning for regression task is obvious. As for classification task -RMS error means error when estimating posterior probabilities. +After this function is over you can call NormEstimatorResults() to get +estimate of the norm(A). -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey + Copyright 06.12.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::mlpermserror( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::normestimatorestimatesparse( + normestimatorstate state, + sparsematrix a, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function serializes data structure to string. +Matrix norm estimation results -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. +INPUT PARAMETERS: + State - algorithm state + +OUTPUT PARAMETERS: + Nrm - estimate of the matrix norm, Nrm>=0 + + -- ALGLIB -- + Copyright 06.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void mlpeserialize(mlpensemble &obj, std::string &s_out); +
    void alglib::normestimatorresults( + normestimatorstate state, + double& nrm, + const xparams _params = alglib::xdefault); +
    - +
     
    /************************************************************************* -This function unserializes data structure from string. +This function changes seed value used by algorithm. In some cases we need +deterministic processing, i.e. subsequent calls must return equal results, +in other cases we need non-deterministic algorithm which returns different +results for the same matrix on every pass. + +Setting zero seed will lead to non-deterministic algorithm, while non-zero +value will make our algorithm deterministic. + +INPUT PARAMETERS: + State - norm estimator state, must be initialized with a call + to NormEstimatorCreate() + SeedVal - seed value, >=0. Zero value = non-deterministic algo. + + -- ALGLIB -- + Copyright 06.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void mlpeunserialize(std::string &s_in, mlpensemble &obj); +
    void alglib::normestimatorsetseed( + normestimatorstate state, + ae_int_t seedval, + const xparams _params = alglib::xdefault); +
    - +
    -mlpcvreport
    -mlpreport
    -mlptrainer
    +odesolverreport
    +odesolverstate
    -mlpcontinuetraining
    -mlpcreatetrainer
    -mlpcreatetrainercls
    -mlpebagginglbfgs
    -mlpebagginglm
    -mlpetraines
    -mlpkfoldcv
    -mlpkfoldcvlbfgs
    -mlpkfoldcvlm
    -mlpsetalgobatch
    -mlpsetcond
    -mlpsetdataset
    -mlpsetdecay
    -mlpsetsparsedataset
    -mlpstarttraining
    -mlptrainensemblees
    -mlptraines
    -mlptrainlbfgs
    -mlptrainlm
    -mlptrainnetwork
    +odesolverresults
    +odesolverrkck
    +odesolversolve
    - - - - - - - - +
    nn_cls2 Binary classification problem
    nn_cls3 Multiclass classification problem
    nn_crossvalidation Cross-validation
    nn_ensembles_es Early stopping ensembles
    nn_parallel Parallel training
    nn_regr Regression problem with one output (2=>1)
    nn_regr_n Regression problem with multiple outputs (2=>2)
    nn_trainerobject Advanced example on trainer object
    odesolver_d1 Solving y'=-y with ODE solver
    - +
     
    /************************************************************************* -Cross-validation estimates of generalization error + *************************************************************************/ -
    class mlpcvreport +
    class odesolverreport { - double relclserror; - double avgce; - double rmserror; - double avgerror; - double avgrelerror; + ae_int_t nfev; + ae_int_t terminationtype; };
    - +
     
    /************************************************************************* -Training report: - * RelCLSError - fraction of misclassified cases. - * AvgCE - acerage cross-entropy - * RMSError - root-mean-square error - * AvgError - average error - * AvgRelError - average relative error - * NGrad - number of gradient calculations - * NHess - number of Hessian calculations - * NCholesky - number of Cholesky decompositions - -NOTE 1: RelCLSError/AvgCE are zero on regression problems. -NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain - errors in prediction of posterior probabilities *************************************************************************/ -
    class mlpreport +
    class odesolverstate { - double relclserror; - double avgce; - double rmserror; - double avgerror; - double avgrelerror; - ae_int_t ngrad; - ae_int_t nhess; - ae_int_t ncholesky; };
    - +
     
    /************************************************************************* -Trainer object for neural network. +ODE solver results -You should not try to access fields of this object directly - use ALGLIB -functions to work with this object. +Called after OdeSolverIteration returned False. + +INPUT PARAMETERS: + State - algorithm state (used by OdeSolverIteration). + +OUTPUT PARAMETERS: + M - number of tabulated values, M>=1 + XTbl - array[0..M-1], values of X + YTbl - array[0..M-1,0..N-1], values of Y in X[i] + Rep - solver report: + * Rep.TerminationType completetion code: + * -2 X is not ordered by ascending/descending or + there are non-distinct X[], i.e. X[i]=X[i+1] + * -1 incorrect parameters were specified + * 1 task has been solved + * Rep.NFEV contains number of function calculations + + -- ALGLIB -- + Copyright 01.09.2009 by Bochkanov Sergey *************************************************************************/ -
    class mlptrainer -{ -}; +
    void alglib::odesolverresults( + odesolverstate state, + ae_int_t& m, + real_1d_array& xtbl, + real_2d_array& ytbl, + odesolverreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -IMPORTANT: this is an "expert" version of the MLPTrain() function. We do - not recommend you to use it unless you are pretty sure that you - need ability to monitor training progress. - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) - ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * gradient calculation over large dataset (if dataset is large enough) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - -This function performs step-by-step training of the neural network. Here -"step-by-step" means that training starts with MLPStartTraining() call, -and then user subsequently calls MLPContinueTraining() to perform one more -iteration of the training. - -This function performs one more iteration of the training and returns -either True (training continues) or False (training stopped). In case True -was returned, Network weights are updated according to the current state -of the optimization progress. In case False was returned, no additional -updates is performed (previous update of the network weights moved us to -the final point, and no additional updates is needed). +Cash-Karp adaptive ODE solver. -EXAMPLE: - > - > [initialize network and trainer object] - > - > MLPStartTraining(Trainer, Network, True) - > while MLPContinueTraining(Trainer, Network) do - > [visualize training progress] - > +This subroutine solves ODE Y'=f(Y,x) with initial conditions Y(xs)=Ys +(here Y may be single variable or vector of N variables). INPUT PARAMETERS: - S - trainer object - Network - neural network structure, which is used to store - current state of the training process. - -OUTPUT PARAMETERS: - Network - weights of the neural network are rewritten by the - current approximation. - -NOTE: this method uses sum-of-squares error function for training. - -NOTE: it is expected that trainer object settings are NOT changed during - step-by-step training, i.e. no one changes stopping criteria or - training set during training. It is possible and there is no defense - against such actions, but algorithm behavior in such cases is - undefined and can be unpredictable. + Y - initial conditions, array[0..N-1]. + contains values of Y[] at X[0] + N - system size + X - points at which Y should be tabulated, array[0..M-1] + integrations starts at X[0], ends at X[M-1], intermediate + values at X[i] are returned too. + SHOULD BE ORDERED BY ASCENDING OR BY DESCENDING! + M - number of intermediate points + first point + last point: + * M>2 means that you need both Y(X[M-1]) and M-2 values at + intermediate points + * M=2 means that you want just to integrate from X[0] to + X[1] and don't interested in intermediate values. + * M=1 means that you don't want to integrate :) + it is degenerate case, but it will be handled correctly. + * M<1 means error + Eps - tolerance (absolute/relative error on each step will be + less than Eps). When passing: + * Eps>0, it means desired ABSOLUTE error + * Eps<0, it means desired RELATIVE error. Relative errors + are calculated with respect to maximum values of Y seen + so far. Be careful to use this criterion when starting + from Y[] that are close to zero. + H - initial step lenth, it will be adjusted automatically + after the first step. If H=0, step will be selected + automatically (usualy it will be equal to 0.001 of + min(x[i]-x[j])). -NOTE: It is expected that Network is the same one which was passed to - MLPStartTraining() function. However, THIS function checks only - following: - * that number of network inputs is consistent with trainer object - settings - * that number of network outputs/classes is consistent with trainer - object settings - * that number of network weights is the same as number of weights in - the network passed to MLPStartTraining() function - Exception is thrown when these conditions are violated. +OUTPUT PARAMETERS + State - structure which stores algorithm state between subsequent + calls of OdeSolverIteration. Used for reverse communication. + This structure should be passed to the OdeSolverIteration + subroutine. - It is also expected that you do not change state of the network on - your own - the only party who has right to change network during its - training is a trainer object. Any attempt to interfere with trainer - may lead to unpredictable results. +SEE ALSO + AutoGKSmoothW, AutoGKSingular, AutoGKIteration, AutoGKResults. -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey + Copyright 01.09.2009 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::mlpcontinuetraining( - mlptrainer s, - multilayerperceptron network); -bool alglib::smp_mlpcontinuetraining( - mlptrainer s, - multilayerperceptron network); +
    void alglib::odesolverrkck( + real_1d_array y, + real_1d_array x, + double eps, + double h, + odesolverstate& state, + const xparams _params = alglib::xdefault); +void alglib::odesolverrkck( + real_1d_array y, + ae_int_t n, + real_1d_array x, + ae_int_t m, + double eps, + double h, + odesolverstate& state, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Creation of the network trainer object for regression networks +This function is used to launcn iterations of ODE solver -INPUT PARAMETERS: - NIn - number of inputs, NIn>=1 - NOut - number of outputs, NOut>=1 +It accepts following parameters: + diff - callback which calculates dy/dx for given y and x + ptr - optional pointer which is passed to diff; can be NULL -OUTPUT PARAMETERS: - S - neural network trainer object. - This structure can be used to train any regression - network with NIn inputs and NOut outputs. -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey + Copyright 01.09.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpcreatetrainer(ae_int_t nin, ae_int_t nout, mlptrainer& s); - +
    void odesolversolve(odesolverstate &state, + void (*diff)(const real_1d_array &y, double x, real_1d_array &dy, void *ptr), + void *ptr = NULL, const xparams _xparams = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  

    - +

    Examples:   [1]  

    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "diffequations.h"
    +
    +using namespace alglib;
    +void ode_function_1_diff(const real_1d_array &y, double x, real_1d_array &dy, void *ptr) 
    +{
    +    // this callback calculates f(y[],x)=-y[0]
    +    dy[0] = -y[0];
    +}
    +
    +int main(int argc, char **argv)
    +{
    +    real_1d_array y = "[1]";
    +    real_1d_array x = "[0, 1, 2, 3]";
    +    double eps = 0.00001;
    +    double h = 0;
    +    odesolverstate s;
    +    ae_int_t m;
    +    real_1d_array xtbl;
    +    real_2d_array ytbl;
    +    odesolverreport rep;
    +    odesolverrkck(y, x, eps, h, s);
    +    alglib::odesolversolve(s, ode_function_1_diff);
    +    odesolverresults(s, m, xtbl, ytbl, rep);
    +    printf("%d\n", int(m)); // EXPECTED: 4
    +    printf("%s\n", xtbl.tostring(2).c_str()); // EXPECTED: [0, 1, 2, 3]
    +    printf("%s\n", ytbl.tostring(2).c_str()); // EXPECTED: [[1], [0.367], [0.135], [0.050]]
    +    return 0;
    +}
    +
    +
    +
    + +
     
    /************************************************************************* -Creation of the network trainer object for classification networks +This structure is used for detailed reporting about suspected C0 +continuity violation. -INPUT PARAMETERS: - NIn - number of inputs, NIn>=1 - NClasses - number of classes, NClasses>=2 +=== WHAT IS TESTED ======================================================= -OUTPUT PARAMETERS: - S - neural network trainer object. - This structure can be used to train any classification - network with NIn inputs and NOut outputs. +C0 test studies function values (not gradient!) obtained during line +searches and monitors estimate of the Lipschitz constant. Sudden spikes +usually indicate that discontinuity was detected. - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpcreatetrainercls( - ae_int_t nin, - ae_int_t nclasses, - mlptrainer& s); -
    -

    Examples:   [1]  [2]  

    - -
    -
    /************************************************************************* -Training neural networks ensemble using bootstrap aggregating (bagging). -L-BFGS algorithm is used as base training method. +=== WHAT IS REPORTED ===================================================== + +Actually, report retrieval function returns TWO report structures: + +* one for most suspicious point found so far (one with highest change in + the function value), so called "strongest" report +* another one for most detailed line search (more function evaluations = + easier to understand what's going on) which triggered test #0 criteria, + so called "longest" report + +In both cases following fields are returned: -INPUT PARAMETERS: - Ensemble - model with initialized geometry - XY - training set - NPoints - training set size - Decay - weight decay coefficient, >=0.001 - Restarts - restarts, >0. - WStep - stopping criterion, same as in MLPTrainLBFGS - MaxIts - stopping criterion, same as in MLPTrainLBFGS +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* fidx - is an index of the function (0 for target function, 1 or higher + for nonlinear constraints) which is suspected of being "non-C1" +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. -OUTPUT PARAMETERS: - Ensemble - trained model - Info - return code: - * -8, if both WStep=0 and MaxIts=0 - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints<0, Restarts<1). - * 2, if task has been solved. - Rep - training report. - OOBErrors - out-of-bag generalization error estimate +You can plot function values stored in stp[] and f[] arrays and study +behavior of your function by your own eyes, just to be sure that test +correctly reported C1 violation. -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey + Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpebagginglbfgs( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - double wstep, - ae_int_t maxits, - ae_int_t& info, - mlpreport& rep, - mlpcvreport& ooberrors); +
    class optguardnonc0report +{ + bool positive; + ae_int_t fidx; + real_1d_array x0; + real_1d_array d; + ae_int_t n; + real_1d_array stp; + real_1d_array f; + ae_int_t cnt; + ae_int_t stpidxa; + ae_int_t stpidxb; +};
    - +
     
    /************************************************************************* -Training neural networks ensemble using bootstrap aggregating (bagging). -Modified Levenberg-Marquardt algorithm is used as base training method. +This structure is used for detailed reporting about suspected C1 +continuity violation as flagged by C1 test #0 (OptGuard has several tests +for C1 continuity, this report is used by #0). -INPUT PARAMETERS: - Ensemble - model with initialized geometry - XY - training set - NPoints - training set size - Decay - weight decay coefficient, >=0.001 - Restarts - restarts, >0. +=== WHAT IS TESTED ======================================================= -OUTPUT PARAMETERS: - Ensemble - trained model - Info - return code: - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints<0, Restarts<1). - * 2, if task has been solved. - Rep - training report. - OOBErrors - out-of-bag generalization error estimate +C1 test #0 studies function values (not gradient!) obtained during line +searches and monitors behavior of directional derivative estimate. This +test is less powerful than test #1, but it does not depend on gradient +values and thus it is more robust against artifacts introduced by +numerical differentiation. + + +=== WHAT IS REPORTED ===================================================== + +Actually, report retrieval function returns TWO report structures: + +* one for most suspicious point found so far (one with highest change in + the directional derivative), so called "strongest" report +* another one for most detailed line search (more function evaluations = + easier to understand what's going on) which triggered test #0 criteria, + so called "longest" report + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* fidx - is an index of the function (0 for target function, 1 or higher + for nonlinear constraints) which is suspected of being "non-C1" +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], f[] - arrays of length CNT which store step lengths and function + values at these points; f[i] is evaluated in x0+stp[i]*d. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +You can plot function values stored in stp[] and f[] arrays and study +behavior of your function by your own eyes, just to be sure that test +correctly reported C1 violation. -- ALGLIB -- - Copyright 17.02.2009 by Bochkanov Sergey + Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpebagginglm( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - ae_int_t& info, - mlpreport& rep, - mlpcvreport& ooberrors); +
    class optguardnonc1test0report +{ + bool positive; + ae_int_t fidx; + real_1d_array x0; + real_1d_array d; + ae_int_t n; + real_1d_array stp; + real_1d_array f; + ae_int_t cnt; + ae_int_t stpidxa; + ae_int_t stpidxb; +};
    - +
     
    /************************************************************************* -Training neural networks ensemble using early stopping. +This structure is used for detailed reporting about suspected C1 +continuity violation as flagged by C1 test #1 (OptGuard has several tests +for C1 continuity, this report is used by #1). -INPUT PARAMETERS: - Ensemble - model with initialized geometry - XY - training set - NPoints - training set size - Decay - weight decay coefficient, >=0.001 - Restarts - restarts, >0. +=== WHAT IS TESTED ======================================================= -OUTPUT PARAMETERS: - Ensemble - trained model - Info - return code: - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints<0, Restarts<1). - * 6, if task has been solved. - Rep - training report. - OOBErrors - out-of-bag generalization error estimate +C1 test #1 studies individual components of the gradient as recorded +during line searches. Upon discovering discontinuity in the gradient this +test records specific component which was suspected (or one with highest +indication of discontinuity if multiple components are suspected). + +When precise analytic gradient is provided this test is more powerful than +test #0 which works with function values and ignores user-provided +gradient. However, test #0 becomes more powerful when numerical +differentiation is employed (in such cases test #1 detects higher levels +of numerical noise and becomes too conservative). + +This test also tells specific components of the gradient which violate C1 +continuity, which makes it more informative than #0, which just tells that +continuity is violated. + + +=== WHAT IS REPORTED ===================================================== + +Actually, report retrieval function returns TWO report structures: + +* one for most suspicious point found so far (one with highest change in + the directional derivative), so called "strongest" report +* another one for most detailed line search (more function evaluations = + easier to understand what's going on) which triggered test #1 criteria, + so called "longest" report + +In both cases following fields are returned: + +* positive - is TRUE when test flagged suspicious point; FALSE if test + did not notice anything (in the latter cases fields below are empty). +* fidx - is an index of the function (0 for target function, 1 or higher + for nonlinear constraints) which is suspected of being "non-C1" +* vidx - is an index of the variable in [0,N) with nonsmooth derivative +* x0[], d[] - arrays of length N which store initial point and direction + for line search (d[] can be normalized, but does not have to) +* stp[], g[] - arrays of length CNT which store step lengths and gradient + values at these points; g[i] is evaluated in x0+stp[i]*d and contains + vidx-th component of the gradient. +* stpidxa, stpidxb - we suspect that function violates C1 continuity + between steps #stpidxa and #stpidxb (usually we have stpidxb=stpidxa+3, + with most likely position of the violation between stpidxa+1 and + stpidxa+2. + +You can plot function values stored in stp[] and g[] arrays and study +behavior of your function by your own eyes, just to be sure that test +correctly reported C1 violation. -- ALGLIB -- - Copyright 10.03.2009 by Bochkanov Sergey + Copyright 19.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpetraines( - mlpensemble ensemble, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - ae_int_t& info, - mlpreport& rep); +
    class optguardnonc1test1report +{ + bool positive; + ae_int_t fidx; + ae_int_t vidx; + real_1d_array x0; + real_1d_array d; + ae_int_t n; + real_1d_array stp; + real_1d_array g; + ae_int_t cnt; + ae_int_t stpidxa; + ae_int_t stpidxb; +};
    - +
     
    /************************************************************************* -This function estimates generalization error using cross-validation on the -current dataset with current training settings. +This structure is used to store OptGuard report, i.e. report on the +properties of the nonlinear function being optimized with ALGLIB. + +After you tell your optimizer to activate OptGuard this technology starts +to silently monitor function values and gradients/Jacobians being passed +all around during your optimization session. Depending on specific set of +checks enabled OptGuard may perform additional function evaluations (say, +about 3*N evaluations if you want to check analytic gradient for errors). + +Upon discovering that something strange happens (function values and/or +gradient components change too sharply and/or unexpectedly) OptGuard sets +one of the "suspicion flags" (without interrupting optimization session). +After optimization is done, you can examine OptGuard report. + +Following report fields can be set: +* nonc0suspected +* nonc1suspected +* badgradsuspected + + +=== WHAT CAN BE DETECTED WITH OptGuard INTEGRITY CHECKER ================= + +Following types of errors in your target function (constraints) can be +caught: +a) discontinuous functions ("non-C0" part of the report) +b) functions with discontinuous derivative ("non-C1" part of the report) +c) errors in the analytic gradient provided by user + +These types of errors result in optimizer stopping well before reaching +solution (most often - right after encountering discontinuity). + +Type A errors are usually coding errors during implementation of the +target function. Most "normal" problems involve continuous functions, and +anyway you can't reliably optimize discontinuous function. + +Type B errors are either coding errors or (in case code itself is correct) +evidence of the fact that your problem is an "incorrect" one. Most +optimizers (except for ones provided by MINNS subpackage) do not support +nonsmooth problems. + +Type C errors are coding errors which often prevent optimizer from making +even one step or result in optimizing stopping too early, as soon as +actual descent direction becomes too different from one suggested by user- +supplied gradient. + + +=== WHAT IS REPORTED ===================================================== + +Following set of report fields deals with discontinuous target functions, +ones not belonging to C0 continuity class: + +* nonc0suspected - is a flag which is set upon discovering some indication + of the discontinuity. If this flag is false, the rest of "non-C0" fields + should be ignored +* nonc0fidx - is an index of the function (0 for target function, 1 or + higher for nonlinear constraints) which is suspected of being "non-C0" +* nonc0lipshitzc - a Lipchitz constant for a function which was suspected + of being non-continuous. +* nonc0test0positive - set to indicate specific test which detected + continuity violation (test #0) + +Following set of report fields deals with discontinuous gradient/Jacobian, +i.e. with functions violating C1 continuity: + +* nonc1suspected - is a flag which is set upon discovering some indication + of the discontinuity. If this flag is false, the rest of "non-C1" fields + should be ignored +* nonc1fidx - is an index of the function (0 for target function, 1 or + higher for nonlinear constraints) which is suspected of being "non-C1" +* nonc1lipshitzc - a Lipchitz constant for a function gradient which was + suspected of being non-smooth. +* nonc1test0positive - set to indicate specific test which detected + continuity violation (test #0) +* nonc1test1positive - set to indicate specific test which detected + continuity violation (test #1) + +Following set of report fields deals with errors in the gradient: +* badgradsuspected - is a flad which is set upon discovering an error in + the analytic gradient supplied by user +* badgradfidx - index of the function with bad gradient (0 for target + function, 1 or higher for nonlinear constraints) +* badgradvidx - index of the variable +* badgradxbase - location where Jacobian is tested +* following matrices store user-supplied Jacobian and its numerical + differentiation version (which is assumed to be free from the coding + errors), both of them computed near the initial point: + * badgraduser, an array[K,N], analytic Jacobian supplied by user + * badgradnum, an array[K,N], numeric Jacobian computed by ALGLIB + Here K is a total number of nonlinear functions (target + nonlinear + constraints), N is a variable number. + The element of badgraduser[] with index [badgradfidx,badgradvidx] is + assumed to be wrong. + +More detailed error log can be obtained from optimizer by explicitly +requesting reports for tests C0.0, C1.0, C1.1. + + -- ALGLIB -- + Copyright 19.11.2018 by Bochkanov Sergey +*************************************************************************/ +
    class optguardreport +{ + bool nonc0suspected; + bool nonc0test0positive; + ae_int_t nonc0fidx; + double nonc0lipschitzc; + bool nonc1suspected; + bool nonc1test0positive; + bool nonc1test1positive; + ae_int_t nonc1fidx; + double nonc1lipschitzc; + bool badgradsuspected; + ae_int_t badgradfidx; + ae_int_t badgradvidx; + real_1d_array badgradxbase; + real_2d_array badgraduser; + real_2d_array badgradnum; +}; -FOR USERS OF COMMERCIAL EDITION: +
    + + + +
    +
    /************************************************************************* +LQ decomposition of a rectangular complex matrix of size MxN - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) - ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * FoldsCount cross-validation rounds (always) - ! * NRestarts training sessions performed within each of - ! cross-validation rounds (if NRestarts>1) - ! * gradient calculation over large dataset (if dataset is large enough) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - S - trainer object - Network - neural network. It must have same number of inputs and - output/classes as was specified during creation of the - trainer object. Network is not changed during cross- - validation and is not trained - it is used only as - representative of its architecture. I.e., we estimate - generalization properties of ARCHITECTURE, not some - specific network. - NRestarts - number of restarts, >=0: - * NRestarts>0 means that for each cross-validation - round specified number of random restarts is - performed, with best network being chosen after - training. - * NRestarts=0 is same as NRestarts=1 - FoldsCount - number of folds in k-fold cross-validation: - * 2<=FoldsCount<=size of dataset - * recommended value: 10. - * values larger than dataset size will be silently - truncated down to dataset size +Input parameters: + A - matrix A whose indexes range within [0..M-1, 0..N-1] + M - number of rows in matrix A. + N - number of columns in matrix A. -OUTPUT PARAMETERS: - Rep - structure which contains cross-validation estimates: - * Rep.RelCLSError - fraction of misclassified cases. - * Rep.AvgCE - acerage cross-entropy - * Rep.RMSError - root-mean-square error - * Rep.AvgError - average error - * Rep.AvgRelError - average relative error +Output parameters: + A - matrices Q and L in compact form + Tau - array of scalar factors which are used to form matrix Q. Array + whose indexes range within [0.. Min(M,N)-1] -NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), - or subset with only one point was given, zeros are returned as - estimates. +Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size +MxM, L - lower triangular (or lower trapezoid) matrix of size MxN. -NOTE: this method performs FoldsCount cross-validation rounds, each one - with NRestarts random starts. Thus, FoldsCount*NRestarts networks - are trained in total. + -- LAPACK routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994 +*************************************************************************/ +
    void alglib::cmatrixlq( + complex_2d_array& a, + ae_int_t m, + ae_int_t n, + complex_1d_array& tau, + const xparams _params = alglib::xdefault); -NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems. +
    + +
    +
    /************************************************************************* +Unpacking of matrix L from the LQ decomposition of a matrix A -NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError - contain errors in prediction of posterior probabilities. +Input parameters: + A - matrices Q and L in compact form. + Output of CMatrixLQ subroutine. + M - number of rows in given matrix A. M>=0. + N - number of columns in given matrix A. N>=0. - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey +Output parameters: + L - matrix L, array[0..M-1, 0..N-1]. + + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpkfoldcv( - mlptrainer s, - multilayerperceptron network, - ae_int_t nrestarts, - ae_int_t foldscount, - mlpreport& rep); -void alglib::smp_mlpkfoldcv( - mlptrainer s, - multilayerperceptron network, - ae_int_t nrestarts, - ae_int_t foldscount, - mlpreport& rep); +
    void alglib::cmatrixlqunpackl( + complex_2d_array a, + ae_int_t m, + ae_int_t n, + complex_2d_array& l, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -Cross-validation estimate of generalization error. +Partial unpacking of matrix Q from LQ decomposition of a complex matrix A. -Base algorithm - L-BFGS. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - Network - neural network with initialized geometry. Network is - not changed during cross-validation - it is used only - as a representative of its architecture. - XY - training set. - SSize - training set size - Decay - weight decay, same as in MLPTrainLBFGS - Restarts - number of restarts, >0. - restarts are counted for each partition separately, so - total number of restarts will be Restarts*FoldsCount. - WStep - stopping criterion, same as in MLPTrainLBFGS - MaxIts - stopping criterion, same as in MLPTrainLBFGS - FoldsCount - number of folds in k-fold cross-validation, - 2<=FoldsCount<=SSize. - recommended value: 10. +Input parameters: + A - matrices Q and R in compact form. + Output of CMatrixLQ subroutine . + M - number of rows in matrix A. M>=0. + N - number of columns in matrix A. N>=0. + Tau - scalar factors which are used to form Q. + Output of CMatrixLQ subroutine . + QRows - required number of rows in matrix Q. N>=QColumns>=0. -OUTPUT PARAMETERS: - Info - return code, same as in MLPTrainLBFGS - Rep - report, same as in MLPTrainLM/MLPTrainLBFGS - CVRep - generalization error estimates +Output parameters: + Q - first QRows rows of matrix Q. + Array whose index ranges within [0..QRows-1, 0..N-1]. + If QRows=0, array isn't changed. - -- ALGLIB -- - Copyright 09.12.2007 by Bochkanov Sergey + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpkfoldcvlbfgs( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - double wstep, - ae_int_t maxits, - ae_int_t foldscount, - ae_int_t& info, - mlpreport& rep, - mlpcvreport& cvrep); +
    void alglib::cmatrixlqunpackq( + complex_2d_array a, + ae_int_t m, + ae_int_t n, + complex_1d_array tau, + ae_int_t qrows, + complex_2d_array& q, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Cross-validation estimate of generalization error. +QR decomposition of a rectangular complex matrix of size MxN -Base algorithm - Levenberg-Marquardt. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - Network - neural network with initialized geometry. Network is - not changed during cross-validation - it is used only - as a representative of its architecture. - XY - training set. - SSize - training set size - Decay - weight decay, same as in MLPTrainLBFGS - Restarts - number of restarts, >0. - restarts are counted for each partition separately, so - total number of restarts will be Restarts*FoldsCount. - FoldsCount - number of folds in k-fold cross-validation, - 2<=FoldsCount<=SSize. - recommended value: 10. +Input parameters: + A - matrix A whose indexes range within [0..M-1, 0..N-1] + M - number of rows in matrix A. + N - number of columns in matrix A. -OUTPUT PARAMETERS: - Info - return code, same as in MLPTrainLBFGS - Rep - report, same as in MLPTrainLM/MLPTrainLBFGS - CVRep - generalization error estimates +Output parameters: + A - matrices Q and R in compact form + Tau - array of scalar factors which are used to form matrix Q. Array + whose indexes range within [0.. Min(M,N)-1] - -- ALGLIB -- - Copyright 09.12.2007 by Bochkanov Sergey +Matrix A is represented as A = QR, where Q is an orthogonal matrix of size +MxM, R - upper triangular (or upper trapezoid) matrix of size MxN. + + -- LAPACK routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994 *************************************************************************/ -
    void alglib::mlpkfoldcvlm( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - ae_int_t foldscount, - ae_int_t& info, - mlpreport& rep, - mlpcvreport& cvrep); +
    void alglib::cmatrixqr( + complex_2d_array& a, + ae_int_t m, + ae_int_t n, + complex_1d_array& tau, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets training algorithm: batch training using L-BFGS will be -used. +Partial unpacking of matrix Q from QR decomposition of a complex matrix A. -This algorithm: -* the most robust for small-scale problems, but may be too slow for large - scale ones. -* perfoms full pass through the dataset before performing step -* uses conditions specified by MLPSetCond() for stopping -* is default one used by trainer object + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS: - S - trainer object +Input parameters: + A - matrices Q and R in compact form. + Output of CMatrixQR subroutine . + M - number of rows in matrix A. M>=0. + N - number of columns in matrix A. N>=0. + Tau - scalar factors which are used to form Q. + Output of CMatrixQR subroutine . + QColumns - required number of columns in matrix Q. M>=QColumns>=0. - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey +Output parameters: + Q - first QColumns columns of matrix Q. + Array whose index ranges within [0..M-1, 0..QColumns-1]. + If QColumns=0, array isn't changed. + + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetalgobatch(mlptrainer s); +
    void alglib::cmatrixqrunpackq( + complex_2d_array a, + ae_int_t m, + ae_int_t n, + complex_1d_array tau, + ae_int_t qcolumns, + complex_2d_array& q, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets stopping criteria for the optimizer. - -INPUT PARAMETERS: - S - trainer object - WStep - stopping criterion. Algorithm stops if step size is - less than WStep. Recommended value - 0.01. Zero step - size means stopping after MaxIts iterations. - WStep>=0. - MaxIts - stopping criterion. Algorithm stops after MaxIts - epochs (full passes over entire dataset). Zero MaxIts - means stopping when step is sufficiently small. - MaxIts>=0. +Unpacking of matrix R from the QR decomposition of a matrix A -NOTE: by default, WStep=0.005 and MaxIts=0 are used. These values are also - used when MLPSetCond() is called with WStep=0 and MaxIts=0. +Input parameters: + A - matrices Q and R in compact form. + Output of CMatrixQR subroutine. + M - number of rows in given matrix A. M>=0. + N - number of columns in given matrix A. N>=0. -NOTE: these stopping criteria are used for all kinds of neural training - - from "conventional" networks to early stopping ensembles. When used - for "conventional" networks, they are used as the only stopping - criteria. When combined with early stopping, they used as ADDITIONAL - stopping criteria which can terminate early stopping algorithm. +Output parameters: + R - matrix R, array[0..M-1, 0..N-1]. - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetcond(mlptrainer s, double wstep, ae_int_t maxits); +
    void alglib::cmatrixqrunpackr( + complex_2d_array a, + ae_int_t m, + ae_int_t n, + complex_2d_array& r, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets "current dataset" of the trainer object to one passed -by user. +Reduction of a Hermitian matrix which is given by its higher or lower +triangular part to a real tridiagonal matrix using unitary similarity +transformation: Q'*A*Q = T. -INPUT PARAMETERS: - S - trainer object - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. - NPoints - points count, >=0. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +Input parameters: + A - matrix to be transformed + array with elements [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - storage format. If IsUpper = True, then matrix A is given + by its upper triangle, and the lower triangle is not used + and not modified by the algorithm, and vice versa + if IsUpper = False. + +Output parameters: + A - matrices T and Q in compact form (see lower) + Tau - array of factors which are forming matrices H(i) + array with elements [0..N-2]. + D - main diagonal of real symmetric matrix T. + array with elements [0..N-1]. + E - secondary diagonal of real symmetric matrix T. + array with elements [0..N-2]. + + + If IsUpper=True, the matrix Q is represented as a product of elementary + reflectors + + Q = H(n-2) . . . H(2) H(0). + + Each H(i) has the form + + H(i) = I - tau * v * v' + + where tau is a complex scalar, and v is a complex vector with + v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in + A(0:i-1,i+1), and tau in TAU(i). + + If IsUpper=False, the matrix Q is represented as a product of elementary + reflectors + + Q = H(0) H(2) . . . H(n-2). + + Each H(i) has the form + + H(i) = I - tau * v * v' + + where tau is a complex scalar, and v is a complex vector with + v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in A(i+2:n-1,i), + and tau in TAU(i). -DATASET FORMAT: + The contents of A on exit are illustrated by the following examples + with n = 5: -This function uses two different dataset formats - one for regression -networks, another one for classification networks. + if UPLO = 'U': if UPLO = 'L': -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + ( d e v1 v2 v3 ) ( d ) + ( d e v2 v3 ) ( e d ) + ( d e v3 ) ( v0 e d ) + ( d e ) ( v0 v1 e d ) + ( d ) ( v0 v1 v2 e d ) -For classification networks with NIn inputs and NClasses clases following -datasetformat is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +where d and e denote diagonal and off-diagonal elements of T, and vi +denotes an element of the vector defining H(i). - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey + -- LAPACK routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 *************************************************************************/ -
    void alglib::mlpsetdataset( - mlptrainer s, - real_2d_array xy, - ae_int_t npoints); +
    void alglib::hmatrixtd( + complex_2d_array& a, + ae_int_t n, + bool isupper, + complex_1d_array& tau, + real_1d_array& d, + real_1d_array& e, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  [7]  [8]  

    - +
     
    /************************************************************************* -This function sets weight decay coefficient which is used for training. +Unpacking matrix Q which reduces a Hermitian matrix to a real tridiagonal +form. -INPUT PARAMETERS: - S - trainer object - Decay - weight decay coefficient, >=0. Weight decay term - 'Decay*||Weights||^2' is added to error function. If - you don't know what Decay to choose, use 1.0E-3. - Weight decay can be set to zero, in this case network - is trained without weight decay. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -NOTE: by default network uses some small nonzero value for weight decay. +Input parameters: + A - the result of a HMatrixTD subroutine + N - size of matrix A. + IsUpper - storage format (a parameter of HMatrixTD subroutine) + Tau - the result of a HMatrixTD subroutine + +Output parameters: + Q - transformation matrix. + array with elements [0..N-1, 0..N-1]. -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey + Copyright 2005-2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpsetdecay(mlptrainer s, double decay); +
    void alglib::hmatrixtdunpackq( + complex_2d_array a, + ae_int_t n, + bool isupper, + complex_1d_array tau, + complex_2d_array& q, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets "current dataset" of the trainer object to one passed -by user (sparse matrix is used to store dataset). +Reduction of a rectangular matrix to bidiagonal form -INPUT PARAMETERS: - S - trainer object - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Any sparse storage format can be used: - Hash-table, CRS... - NPoints - points count, >=0 +The algorithm reduces the rectangular matrix A to bidiagonal form by +orthogonal transformations P and Q: A = Q*B*(P^T). -DATASET FORMAT: + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +Input parameters: + A - source matrix. array[0..M-1, 0..N-1] + M - number of rows in matrix A. + N - number of columns in matrix A. -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +Output parameters: + A - matrices Q, B, P in compact form (see below). + TauQ - scalar factors which are used to form matrix Q. + TauP - scalar factors which are used to form matrix P. -For classification networks with NIn inputs and NClasses clases following -datasetformat is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +The main diagonal and one of the secondary diagonals of matrix A are +replaced with bidiagonal matrix B. Other elements contain elementary +reflections which form MxM matrix Q and NxN matrix P, respectively. - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::mlpsetsparsedataset( - mlptrainer s, - sparsematrix xy, - ae_int_t npoints); +If M>=N, B is the upper bidiagonal MxN matrix and is stored in the +corresponding elements of matrix A. Matrix Q is represented as a +product of elementary reflections Q = H(0)*H(1)*...*H(n-1), where +H(i) = 1-tau*v*v'. Here tau is a scalar which is stored in TauQ[i], and +vector v has the following structure: v(0:i-1)=0, v(i)=1, v(i+1:m-1) is +stored in elements A(i+1:m-1,i). Matrix P is as follows: P = +G(0)*G(1)*...*G(n-2), where G(i) = 1 - tau*u*u'. Tau is stored in TauP[i], +u(0:i)=0, u(i+1)=1, u(i+2:n-1) is stored in elements A(i,i+2:n-1). -
    - -
    -
    /************************************************************************* -IMPORTANT: this is an "expert" version of the MLPTrain() function. We do - not recommend you to use it unless you are pretty sure that you - need ability to monitor training progress. +If M<N, B is the lower bidiagonal MxN matrix and is stored in the +corresponding elements of matrix A. Q = H(0)*H(1)*...*H(m-2), where +H(i) = 1 - tau*v*v', tau is stored in TauQ, v(0:i)=0, v(i+1)=1, v(i+2:m-1) +is stored in elements A(i+2:m-1,i). P = G(0)*G(1)*...*G(m-1), +G(i) = 1-tau*u*u', tau is stored in TauP, u(0:i-1)=0, u(i)=1, u(i+1:n-1) +is stored in A(i,i+1:n-1). -This function performs step-by-step training of the neural network. Here -"step-by-step" means that training starts with MLPStartTraining() call, -and then user subsequently calls MLPContinueTraining() to perform one more -iteration of the training. +EXAMPLE: -After call to this function trainer object remembers network and is ready -to train it. However, no training is performed until first call to -MLPContinueTraining() function. Subsequent calls to MLPContinueTraining() -will advance training progress one iteration further. +m=6, n=5 (m > n): m=5, n=6 (m < n): -EXAMPLE: - > - > ...initialize network and trainer object.... - > - > MLPStartTraining(Trainer, Network, True) - > while MLPContinueTraining(Trainer, Network) do - > ...visualize training progress... - > +( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) +( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) +( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) +( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) +( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) +( v1 v2 v3 v4 v5 ) -INPUT PARAMETERS: - S - trainer object - Network - neural network. It must have same number of inputs and - output/classes as was specified during creation of the - trainer object. - RandomStart - randomize network before training or not: - * True means that network is randomized and its - initial state (one which was passed to the trainer - object) is lost. - * False means that training is started from the - current state of the network +Here vi and ui are vectors which form H(i) and G(i), and d and e - +are the diagonal and off-diagonal elements of matrix B. -OUTPUT PARAMETERS: - Network - neural network which is ready to training (weights are - initialized, preprocessor is initialized using current - training set) + -- LAPACK routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994. + Sergey Bochkanov, ALGLIB project, translation from FORTRAN to + pseudocode, 2007-2010. +*************************************************************************/ +
    void alglib::rmatrixbd( + real_2d_array& a, + ae_int_t m, + ae_int_t n, + real_1d_array& tauq, + real_1d_array& taup, + const xparams _params = alglib::xdefault); -NOTE: this method uses sum-of-squares error function for training. +
    + +
    +
    /************************************************************************* +Multiplication by matrix P which reduces matrix A to bidiagonal form. -NOTE: it is expected that trainer object settings are NOT changed during - step-by-step training, i.e. no one changes stopping criteria or - training set during training. It is possible and there is no defense - against such actions, but algorithm behavior in such cases is - undefined and can be unpredictable. +The algorithm allows pre- or post-multiply by P or P'. + +Input parameters: + QP - matrices Q and P in compact form. + Output of RMatrixBD subroutine. + M - number of rows in matrix A. + N - number of columns in matrix A. + TAUP - scalar factors which are used to form P. + Output of RMatrixBD subroutine. + Z - multiplied matrix. + Array whose indexes range within [0..ZRows-1,0..ZColumns-1]. + ZRows - number of rows in matrix Z. If FromTheRight=False, + ZRows=N, otherwise ZRows can be arbitrary. + ZColumns - number of columns in matrix Z. If FromTheRight=True, + ZColumns=N, otherwise ZColumns can be arbitrary. + FromTheRight - pre- or post-multiply. + DoTranspose - multiply by P or P'. + +Output parameters: + Z - product of Z and P. + Array whose indexes range within [0..ZRows-1,0..ZColumns-1]. + If ZRows=0 or ZColumns=0, the array is not modified. -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey + 2005-2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlpstarttraining( - mlptrainer s, - multilayerperceptron network, - bool randomstart); +
    void alglib::rmatrixbdmultiplybyp( + real_2d_array qp, + ae_int_t m, + ae_int_t n, + real_1d_array taup, + real_2d_array& z, + ae_int_t zrows, + ae_int_t zcolumns, + bool fromtheright, + bool dotranspose, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function trains neural network ensemble passed to this function using -current dataset and early stopping training algorithm. Each early stopping -round performs NRestarts random restarts (thus, EnsembleSize*NRestarts -training rounds is performed in total). +Multiplication by matrix Q which reduces matrix A to bidiagonal form. -FOR USERS OF COMMERCIAL EDITION: +The algorithm allows pre- or post-multiply by Q or Q'. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) - ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * EnsembleSize training sessions performed for each of ensemble - ! members (always parallelized) - ! * NRestarts training sessions performed within each of training - ! sessions (if NRestarts>1) - ! * gradient calculation over large dataset (if dataset is large enough) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - -INPUT PARAMETERS: - S - trainer object; - Ensemble - neural network ensemble. It must have same number of - inputs and outputs/classes as was specified during - creation of the trainer object. - NRestarts - number of restarts, >=0: - * NRestarts>0 means that specified number of random - restarts are performed during each ES round; - * NRestarts=0 is silently replaced by 1. - -OUTPUT PARAMETERS: - Ensemble - trained ensemble; - Rep - it contains all type of errors. - -NOTE: this training method uses BOTH early stopping and weight decay! So, - you should select weight decay before starting training just as you - select it before training "conventional" networks. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), - or single-point dataset was passed, ensemble is filled by zero - values. +Input parameters: + QP - matrices Q and P in compact form. + Output of ToBidiagonal subroutine. + M - number of rows in matrix A. + N - number of columns in matrix A. + TAUQ - scalar factors which are used to form Q. + Output of ToBidiagonal subroutine. + Z - multiplied matrix. + array[0..ZRows-1,0..ZColumns-1] + ZRows - number of rows in matrix Z. If FromTheRight=False, + ZRows=M, otherwise ZRows can be arbitrary. + ZColumns - number of columns in matrix Z. If FromTheRight=True, + ZColumns=M, otherwise ZColumns can be arbitrary. + FromTheRight - pre- or post-multiply. + DoTranspose - multiply by Q or Q'. -NOTE: this method uses sum-of-squares error function for training. +Output parameters: + Z - product of Z and Q. + Array[0..ZRows-1,0..ZColumns-1] + If ZRows=0 or ZColumns=0, the array is not modified. -- ALGLIB -- - Copyright 22.08.2012 by Bochkanov Sergey + 2005-2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlptrainensemblees( - mlptrainer s, - mlpensemble ensemble, - ae_int_t nrestarts, - mlpreport& rep); -void alglib::smp_mlptrainensemblees( - mlptrainer s, - mlpensemble ensemble, - ae_int_t nrestarts, - mlpreport& rep); +
    void alglib::rmatrixbdmultiplybyq( + real_2d_array qp, + ae_int_t m, + ae_int_t n, + real_1d_array tauq, + real_2d_array& z, + ae_int_t zrows, + ae_int_t zcolumns, + bool fromtheright, + bool dotranspose, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -Neural network training using early stopping (base algorithm - L-BFGS with -regularization). - -INPUT PARAMETERS: - Network - neural network with initialized geometry - TrnXY - training set - TrnSize - training set size, TrnSize>0 - ValXY - validation set - ValSize - validation set size, ValSize>0 - Decay - weight decay constant, >=0.001 - Decay term 'Decay*||Weights||^2' is added to error - function. - If you don't know what Decay to choose, use 0.001. - Restarts - number of restarts, either: - * strictly positive number - algorithm make specified - number of restarts from random position. - * -1, in which case algorithm makes exactly one run - from the initial state of the network (no randomization). - If you don't know what Restarts to choose, choose one - one the following: - * -1 (deterministic start) - * +1 (one random restart) - * +5 (moderate amount of random restarts) - -OUTPUT PARAMETERS: - Network - trained neural network. - Info - return code: - * -2, if there is a point with class number - outside of [0..NOut-1]. - * -1, if wrong parameters specified - (NPoints<0, Restarts<1, ...). - * 2, task has been solved, stopping criterion met - - sufficiently small step size. Not expected (we - use EARLY stopping) but possible and not an - error. - * 6, task has been solved, stopping criterion met - - increasing of validation set error. - Rep - training report +Unpacking of the main and secondary diagonals of bidiagonal decomposition +of matrix A. -NOTE: +Input parameters: + B - output of RMatrixBD subroutine. + M - number of rows in matrix B. + N - number of columns in matrix B. -Algorithm stops if validation set error increases for a long enough or -step size is small enought (there are task where validation set may -decrease for eternity). In any case solution returned corresponds to the -minimum of validation set error. +Output parameters: + IsUpper - True, if the matrix is upper bidiagonal. + otherwise IsUpper is False. + D - the main diagonal. + Array whose index ranges within [0..Min(M,N)-1]. + E - the secondary diagonal (upper or lower, depending on + the value of IsUpper). + Array index ranges within [0..Min(M,N)-1], the last + element is not used. -- ALGLIB -- - Copyright 10.03.2009 by Bochkanov Sergey + 2005-2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlptraines( - multilayerperceptron network, - real_2d_array trnxy, - ae_int_t trnsize, - real_2d_array valxy, - ae_int_t valsize, - double decay, - ae_int_t restarts, - ae_int_t& info, - mlpreport& rep); +
    void alglib::rmatrixbdunpackdiagonals( + real_2d_array b, + ae_int_t m, + ae_int_t n, + bool& isupper, + real_1d_array& d, + real_1d_array& e, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Neural network training using L-BFGS algorithm with regularization. -Subroutine trains neural network with restarts from random positions. -Algorithm is well suited for problems of any dimensionality (memory -requirements and step complexity are linear by weights number). +Unpacking matrix P which reduces matrix A to bidiagonal form. +The subroutine returns transposed matrix P. -INPUT PARAMETERS: - Network - neural network with initialized geometry - XY - training set - NPoints - training set size - Decay - weight decay constant, >=0.001 - Decay term 'Decay*||Weights||^2' is added to error - function. - If you don't know what Decay to choose, use 0.001. - Restarts - number of restarts from random position, >0. - If you don't know what Restarts to choose, use 2. - WStep - stopping criterion. Algorithm stops if step size is - less than WStep. Recommended value - 0.01. Zero step - size means stopping after MaxIts iterations. - MaxIts - stopping criterion. Algorithm stops after MaxIts - iterations (NOT gradient calculations). Zero MaxIts - means stopping when step is sufficiently small. +Input parameters: + QP - matrices Q and P in compact form. + Output of ToBidiagonal subroutine. + M - number of rows in matrix A. + N - number of columns in matrix A. + TAUP - scalar factors which are used to form P. + Output of ToBidiagonal subroutine. + PTRows - required number of rows of matrix P^T. N >= PTRows >= 0. -OUTPUT PARAMETERS: - Network - trained neural network. - Info - return code: - * -8, if both WStep=0 and MaxIts=0 - * -2, if there is a point with class number - outside of [0..NOut-1]. - * -1, if wrong parameters specified - (NPoints<0, Restarts<1). - * 2, if task has been solved. - Rep - training report +Output parameters: + PT - first PTRows columns of matrix P^T + Array[0..PTRows-1, 0..N-1] + If PTRows=0, the array is not modified. -- ALGLIB -- - Copyright 09.12.2007 by Bochkanov Sergey + 2005-2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlptrainlbfgs( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - double wstep, - ae_int_t maxits, - ae_int_t& info, - mlpreport& rep); +
    void alglib::rmatrixbdunpackpt( + real_2d_array qp, + ae_int_t m, + ae_int_t n, + real_1d_array taup, + ae_int_t ptrows, + real_2d_array& pt, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Neural network training using modified Levenberg-Marquardt with exact -Hessian calculation and regularization. Subroutine trains neural network -with restarts from random positions. Algorithm is well suited for small -and medium scale problems (hundreds of weights). +Unpacking matrix Q which reduces a matrix to bidiagonal form. -INPUT PARAMETERS: - Network - neural network with initialized geometry - XY - training set - NPoints - training set size - Decay - weight decay constant, >=0.001 - Decay term 'Decay*||Weights||^2' is added to error - function. - If you don't know what Decay to choose, use 0.001. - Restarts - number of restarts from random position, >0. - If you don't know what Restarts to choose, use 2. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -OUTPUT PARAMETERS: - Network - trained neural network. - Info - return code: - * -9, if internal matrix inverse subroutine failed - * -2, if there is a point with class number - outside of [0..NOut-1]. - * -1, if wrong parameters specified - (NPoints<0, Restarts<1). - * 2, if task has been solved. - Rep - training report +Input parameters: + QP - matrices Q and P in compact form. + Output of ToBidiagonal subroutine. + M - number of rows in matrix A. + N - number of columns in matrix A. + TAUQ - scalar factors which are used to form Q. + Output of ToBidiagonal subroutine. + QColumns - required number of columns in matrix Q. + M>=QColumns>=0. + +Output parameters: + Q - first QColumns columns of matrix Q. + Array[0..M-1, 0..QColumns-1] + If QColumns=0, the array is not modified. -- ALGLIB -- - Copyright 10.03.2009 by Bochkanov Sergey + 2005-2010 + Bochkanov Sergey *************************************************************************/ -
    void alglib::mlptrainlm( - multilayerperceptron network, - real_2d_array xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - ae_int_t& info, - mlpreport& rep); +
    void alglib::rmatrixbdunpackq( + real_2d_array qp, + ae_int_t m, + ae_int_t n, + real_1d_array tauq, + ae_int_t qcolumns, + real_2d_array& q, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function trains neural network passed to this function, using current -dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset()) -and current training settings. Training from NRestarts random starting -positions is performed, best network is chosen. +Reduction of a square matrix to upper Hessenberg form: Q'*A*Q = H, +where Q is an orthogonal matrix, H - Hessenberg matrix. -Training is performed using current training algorithm. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -FOR USERS OF COMMERCIAL EDITION: +Input parameters: + A - matrix A with elements [0..N-1, 0..N-1] + N - size of matrix A. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) - ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * NRestarts training sessions performed within each of - ! cross-validation rounds (if NRestarts>1) - ! * gradient calculation over large dataset (if dataset is large enough) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +Output parameters: + A - matrices Q and P in compact form (see below). + Tau - array of scalar factors which are used to form matrix Q. + Array whose index ranges within [0..N-2] -INPUT PARAMETERS: - S - trainer object - Network - neural network. It must have same number of inputs and - output/classes as was specified during creation of the - trainer object. - NRestarts - number of restarts, >=0: - * NRestarts>0 means that specified number of random - restarts are performed, best network is chosen after - training - * NRestarts=0 means that current state of the network - is used for training. +Matrix H is located on the main diagonal, on the lower secondary diagonal +and above the main diagonal of matrix A. The elements which are used to +form matrix Q are situated in array Tau and below the lower secondary +diagonal of matrix A as follows: -OUTPUT PARAMETERS: - Network - trained network +Matrix Q is represented as a product of elementary reflections -NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(), - network is filled by zero values. Same behavior for functions - MLPStartTraining and MLPContinueTraining. +Q = H(0)*H(2)*...*H(n-2), -NOTE: this method uses sum-of-squares error function for training. +where each H(i) is given by - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey +H(i) = 1 - tau * v * (v^T) + +where tau is a scalar stored in Tau[I]; v - is a real vector, +so that v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) stored in A(i+2:n-1,i). + + -- LAPACK routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + October 31, 1992 *************************************************************************/ -
    void alglib::mlptrainnetwork( - mlptrainer s, - multilayerperceptron network, - ae_int_t nrestarts, - mlpreport& rep); -void alglib::smp_mlptrainnetwork( - mlptrainer s, - multilayerperceptron network, - ae_int_t nrestarts, - mlpreport& rep); +
    void alglib::rmatrixhessenberg( + real_2d_array& a, + ae_int_t n, + real_1d_array& tau, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +Unpacking matrix H (the result of matrix A reduction to upper Hessenberg form) +Input parameters: + A - output of RMatrixHessenberg subroutine. + N - size of matrix A. -int main(int argc, char **argv) -{ - // - // Suppose that we want to classify numbers as positive (class 0) and negative - // (class 1). We have training set which includes several strictly positive - // or negative numbers - and zero. - // - // The problem is that we are not sure how to classify zero, so from time to - // time we mark it as positive or negative (with equal probability). Other - // numbers are marked in pure deterministic setting. How will neural network - // cope with such classification task? - // - // NOTE: we use network with excessive amount of neurons, which guarantees - // almost exact reproduction of the training set. Generalization ability - // of such network is rather low, but we are not concerned with such - // questions in this basic demo. - // - mlptrainer trn; - multilayerperceptron network; - mlpreport rep; - real_1d_array x = "[0]"; - real_1d_array y = "[0,0]"; +Output parameters: + H - matrix H. Array whose indexes range within [0..N-1, 0..N-1]. - // - // Training set. One row corresponds to one record [A => class(A)]. - // - // Classes are denoted by numbers from 0 to 1, where 0 corresponds to positive - // numbers and 1 to negative numbers. - // - // [ +1 0] - // [ +2 0] - // [ -1 1] - // [ -2 1] - // [ 0 0] !! sometimes we classify 0 as positive, sometimes as negative - // [ 0 1] !! - // - real_2d_array xy = "[[+1,0],[+2,0],[-1,1],[-2,1],[0,0],[0,1]]"; + -- ALGLIB -- + 2005-2010 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixhessenbergunpackh( + real_2d_array a, + ae_int_t n, + real_2d_array& h, + const xparams _params = alglib::xdefault); - // - // - // When we solve classification problems, everything is slightly different from - // the regression ones: - // - // 1. Network is created. Because we solve classification problem, we use - // mlpcreatec1() function instead of mlpcreate1(). This function creates - // classifier network with SOFTMAX-normalized outputs. This network returns - // vector of class membership probabilities which are normalized to be - // non-negative and sum to 1.0 - // - // 2. We use mlpcreatetrainercls() function instead of mlpcreatetrainer() to - // create trainer object. Trainer object process dataset and neural network - // slightly differently to account for specifics of the classification - // problems. - // - // 3. Dataset is attached to trainer object. Note that dataset format is slightly - // different from one used for regression. - // - mlpcreatetrainercls(1, 2, trn); - mlpcreatec1(1, 5, 2, network); - mlpsetdataset(trn, xy, 6); +
    + +
    +
    /************************************************************************* +Unpacking matrix Q which reduces matrix A to upper Hessenberg form - // - // Network is trained with 5 restarts from random positions - // - mlptrainnetwork(trn, network, 5, rep); + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - // - // Test our neural network on strictly positive and strictly negative numbers. - // - // IMPORTANT! Classifier network returns class membership probabilities instead - // of class indexes. Network returns two values (probabilities) instead of one - // (class index). - // - // Thus, for +1 we expect to get [P0,P1] = [1,0], where P0 is probability that - // number is positive (belongs to class 0), and P1 is probability that number - // is negative (belongs to class 1). - // - // For -1 we expect to get [P0,P1] = [0,1] - // - // Following properties are guaranteed by network architecture: - // * P0>=0, P1>=0 non-negativity - // * P0+P1=1 normalization - // - x = "[1]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [1.000,0.000] - x = "[-1]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.000,1.000] +Input parameters: + A - output of RMatrixHessenberg subroutine. + N - size of matrix A. + Tau - scalar factors which are used to form Q. + Output of RMatrixHessenberg subroutine. - // - // But what our network will return for 0, which is between classes 0 and 1? - // - // In our dataset it has two different marks assigned (class 0 AND class 1). - // So network will return something average between class 0 and class 1: - // 0 => [0.5, 0.5] - // - x = "[0]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.500,0.500] - return 0; -} +Output parameters: + Q - matrix Q. + Array whose indexes range within [0..N-1, 0..N-1]. + -- ALGLIB -- + 2005-2010 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixhessenbergunpackq( + real_2d_array a, + ae_int_t n, + real_1d_array tau, + real_2d_array& q, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    -
    -using namespace alglib;
    -
    +
    /************************************************************************* +LQ decomposition of a rectangular matrix of size MxN -int main(int argc, char **argv) -{ - // - // Suppose that we want to classify numbers as positive (class 0) and negative - // (class 1). We also have one more class for zero (class 2). - // - // NOTE: we use network with excessive amount of neurons, which guarantees - // almost exact reproduction of the training set. Generalization ability - // of such network is rather low, but we are not concerned with such - // questions in this basic demo. - // - mlptrainer trn; - multilayerperceptron network; - mlpreport rep; - real_1d_array x = "[0]"; - real_1d_array y = "[0,0,0]"; + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - // - // Training set. One row corresponds to one record [A => class(A)]. - // - // Classes are denoted by numbers from 0 to 2, where 0 corresponds to positive - // numbers, 1 to negative numbers, 2 to zero - // - // [ +1 0] - // [ +2 0] - // [ -1 1] - // [ -2 1] - // [ 0 2] - // - real_2d_array xy = "[[+1,0],[+2,0],[-1,1],[-2,1],[0,2]]"; +Input parameters: + A - matrix A whose indexes range within [0..M-1, 0..N-1]. + M - number of rows in matrix A. + N - number of columns in matrix A. - // - // - // When we solve classification problems, everything is slightly different from - // the regression ones: - // - // 1. Network is created. Because we solve classification problem, we use - // mlpcreatec1() function instead of mlpcreate1(). This function creates - // classifier network with SOFTMAX-normalized outputs. This network returns - // vector of class membership probabilities which are normalized to be - // non-negative and sum to 1.0 - // - // 2. We use mlpcreatetrainercls() function instead of mlpcreatetrainer() to - // create trainer object. Trainer object process dataset and neural network - // slightly differently to account for specifics of the classification - // problems. - // - // 3. Dataset is attached to trainer object. Note that dataset format is slightly - // different from one used for regression. - // - mlpcreatetrainercls(1, 3, trn); - mlpcreatec1(1, 5, 3, network); - mlpsetdataset(trn, xy, 5); +Output parameters: + A - matrices L and Q in compact form (see below) + Tau - array of scalar factors which are used to form + matrix Q. Array whose index ranges within [0..Min(M,N)-1]. - // - // Network is trained with 5 restarts from random positions - // - mlptrainnetwork(trn, network, 5, rep); +Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size +MxM, L - lower triangular (or lower trapezoid) matrix of size M x N. - // - // Test our neural network on strictly positive and strictly negative numbers. - // - // IMPORTANT! Classifier network returns class membership probabilities instead - // of class indexes. Network returns three values (probabilities) instead of one - // (class index). - // - // Thus, for +1 we expect to get [P0,P1,P2] = [1,0,0], - // for -1 we expect to get [P0,P1,P2] = [0,1,0], - // and for 0 we will get [P0,P1,P2] = [0,0,1]. - // - // Following properties are guaranteed by network architecture: - // * P0>=0, P1>=0, P2>=0 non-negativity - // * P0+P1+P2=1 normalization - // - x = "[1]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [1.000,0.000,0.000] - x = "[-1]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.000,1.000,0.000] - x = "[0]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [0.000,0.000,1.000] - return 0; -} +The elements of matrix L are located on and below the main diagonal of +matrix A. The elements which are located in Tau array and above the main +diagonal of matrix A are used to form matrix Q as follows: +Matrix Q is represented as a product of elementary reflections -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +Q = H(k-1)*H(k-2)*...*H(1)*H(0),
     
    -using namespace alglib;
    +where k = min(m,n), and each H(i) is of the form
     
    +H(i) = 1 - tau * v * (v^T)
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example shows how to perform cross-validation with ALGLIB
    -    //
    -    mlptrainer trn;
    -    multilayerperceptron network;
    -    mlpreport rep;
    +where tau is a scalar stored in Tau[I]; v - real vector, so that v(0:i-1)=0,
    +v(i) = 1, v(i+1:n-1) stored in A(i,i+1:n-1).
     
    -    //
    -    // Training set: f(x)=1/(x^2+1)
    -    // One row corresponds to one record [x,f(x)]
    -    //
    -    real_2d_array xy = "[[-2.0,0.2],[-1.6,0.3],[-1.3,0.4],[-1,0.5],[-0.6,0.7],[-0.3,0.9],[0,1],[2.0,0.2],[1.6,0.3],[1.3,0.4],[1,0.5],[0.6,0.7],[0.3,0.9]]";
    +  -- ALGLIB routine --
    +     17.02.2010
    +     Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::rmatrixlq( + real_2d_array& a, + ae_int_t m, + ae_int_t n, + real_1d_array& tau, + const xparams _params = alglib::xdefault); - // - // Trainer object is created. - // Dataset is attached to trainer object. - // - // NOTE: it is not good idea to perform cross-validation on sample - // as small as ours (13 examples). It is done for demonstration - // purposes only. Generalization error estimates won't be - // precise enough for practical purposes. - // - mlpcreatetrainer(1, 1, trn); - mlpsetdataset(trn, xy, 13); +
    + +
    +
    /************************************************************************* +Unpacking of matrix L from the LQ decomposition of a matrix A - // - // The key property of the cross-validation is that it estimates - // generalization properties of neural ARCHITECTURE. It does NOT - // estimates generalization error of some specific network which - // is passed to the k-fold CV routine. - // - // In our example we create 1x4x1 neural network and pass it to - // CV routine without training it. Original state of the network - // is not used for cross-validation - each round is restarted from - // random initial state. Only geometry of network matters. - // - // We perform 5 restarts from different random positions for each - // of the 10 cross-validation rounds. - // - mlpcreate1(1, 4, 1, network); - mlpkfoldcv(trn, network, 5, 10, rep); +Input parameters: + A - matrices Q and L in compact form. + Output of RMatrixLQ subroutine. + M - number of rows in given matrix A. M>=0. + N - number of columns in given matrix A. N>=0. - // - // Cross-validation routine stores estimates of the generalization - // error to MLP report structure. You may examine its fields and - // see estimates of different errors (RMS, CE, Avg). - // - // Because cross-validation is non-deterministic, in our manual we - // can not say what values will be stored to rep after call to - // mlpkfoldcv(). Every CV round will return slightly different - // estimates. - // - return 0; -} +Output parameters: + L - matrix L, array[0..M-1, 0..N-1]. + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixlqunpackl( + real_2d_array a, + ae_int_t m, + ae_int_t n, + real_2d_array& l, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +
    /************************************************************************* +Partial unpacking of matrix Q from the LQ decomposition of a matrix A -using namespace alglib; + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +Input parameters: + A - matrices L and Q in compact form. + Output of RMatrixLQ subroutine. + M - number of rows in given matrix A. M>=0. + N - number of columns in given matrix A. N>=0. + Tau - scalar factors which are used to form Q. + Output of the RMatrixLQ subroutine. + QRows - required number of rows in matrix Q. N>=QRows>=0. -int main(int argc, char **argv) -{ - // - // This example shows how to train early stopping ensebles. - // - mlptrainer trn; - mlpensemble ensemble; - mlpreport rep; +Output parameters: + Q - first QRows rows of matrix Q. Array whose indexes range + within [0..QRows-1, 0..N-1]. If QRows=0, the array remains + unchanged. - // - // Training set: f(x)=1/(x^2+1) - // One row corresponds to one record [x,f(x)] - // - real_2d_array xy = "[[-2.0,0.2],[-1.6,0.3],[-1.3,0.4],[-1,0.5],[-0.6,0.7],[-0.3,0.9],[0,1],[2.0,0.2],[1.6,0.3],[1.3,0.4],[1,0.5],[0.6,0.7],[0.3,0.9]]"; + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixlqunpackq( + real_2d_array a, + ae_int_t m, + ae_int_t n, + real_1d_array tau, + ae_int_t qrows, + real_2d_array& q, + const xparams _params = alglib::xdefault); - // - // Trainer object is created. - // Dataset is attached to trainer object. - // - // NOTE: it is not good idea to use early stopping ensemble on sample - // as small as ours (13 examples). It is done for demonstration - // purposes only. Ensemble training algorithm won't find good - // solution on such small sample. - // - mlpcreatetrainer(1, 1, trn); - mlpsetdataset(trn, xy, 13); +
    + +
    +
    /************************************************************************* +QR decomposition of a rectangular matrix of size MxN - // - // Ensemble is created and trained. Each of 50 network is trained - // with 5 restarts. - // - mlpecreate1(1, 4, 1, 50, ensemble); - mlptrainensemblees(trn, ensemble, 5, rep); - return 0; -} + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +Input parameters: + A - matrix A whose indexes range within [0..M-1, 0..N-1]. + M - number of rows in matrix A. + N - number of columns in matrix A. -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +Output parameters:
    +    A   -   matrices Q and R in compact form (see below).
    +    Tau -   array of scalar factors which are used to form
    +            matrix Q. Array whose index ranges within [0.. Min(M-1,N-1)].
     
    -using namespace alglib;
    +Matrix A is represented as A = QR, where Q is an orthogonal matrix of size
    +MxM, R - upper triangular (or upper trapezoid) matrix of size M x N.
     
    +The elements of matrix R are located on and above the main diagonal of
    +matrix A. The elements which are located in Tau array and below the main
    +diagonal of matrix A are used to form matrix Q as follows:
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example shows how to use parallel functionality of ALGLIB.
    -    // We generate simple 1-dimensional regression problem and show how
    -    // to use parallel training, parallel cross-validation, parallel
    -    // training of neural ensembles.
    -    //
    -    // We assume that you already know how to use ALGLIB in serial mode
    -    // and concentrate on its parallel capabilities.
    -    //
    -    // NOTE: it is not good idea to use parallel features on sample as small
    -    //       as ours (13 examples). It is done only for demonstration purposes.
    -    //
    -    mlptrainer trn;
    -    multilayerperceptron network;
    -    mlpensemble ensemble;
    -    mlpreport rep;
    -    real_2d_array xy = "[[-2.0,0.2],[-1.6,0.3],[-1.3,0.4],[-1,0.5],[-0.6,0.7],[-0.3,0.9],[0,1],[2.0,0.2],[1.6,0.3],[1.3,0.4],[1,0.5],[0.6,0.7],[0.3,0.9]]";
    -    mlpcreatetrainer(1, 1, trn);
    -    mlpsetdataset(trn, xy, 13);
    -    mlpcreate1(1, 4, 1, network);
    -    mlpecreate1(1, 4, 1, 50, ensemble);
    +Matrix Q is represented as a product of elementary reflections
     
    -    //
    -    // Below we demonstrate how to perform:
    -    // * parallel training of individual networks
    -    // * parallel cross-validation
    -    // * parallel training of neural ensembles
    -    //
    -    // In order to use multithreading, you have to:
    -    // 1) Install SMP edition of ALGLIB.
    -    // 2) This step is specific for C++ users: you should activate OS-specific
    -    //    capabilities of ALGLIB by defining AE_OS=AE_POSIX (for *nix systems)
    -    //    or AE_OS=AE_WINDOWS (for Windows systems).
    -    //    C# users do not have to perform this step because C# programs are
    -    //    portable across different systems without OS-specific tuning.
    -    // 3) Allow ALGLIB to know about number of worker threads to use:
    -    //    a) autodetection (C++, C#):
    -    //          ALGLIB will automatically determine number of CPU cores and
    -    //          (by default) will use all cores except for one. Say, on 4-core
    -    //          system it will use three cores - unless you manually told it
    -    //          to use more or less. It will keep your system responsive during
    -    //          lengthy computations.
    -    //          Such behavior may be changed with setnworkers() call:
    -    //          * alglib::setnworkers(0)  = use all cores
    -    //          * alglib::setnworkers(-1) = leave one core unused
    -    //          * alglib::setnworkers(-2) = leave two cores unused
    -    //          * alglib::setnworkers(+2) = use 2 cores (even if you have more)
    -    //    b) manual specification (C++, C#):
    -    //          You may want to specify maximum number of worker threads during
    -    //          compile time by means of preprocessor definition AE_NWORKERS.
    -    //          For C++ it will be "AE_NWORKERS=X" where X can be any positive number.
    -    //          For C# it is "AE_NWORKERSX", where X should be replaced by number of
    -    //          workers (AE_NWORKERS2, AE_NWORKERS3, AE_NWORKERS4, ...).
    -    //          You can add this definition to compiler command line or change
    -    //          corresponding project settings in your IDE.
    -    //
    -    // After you installed and configured SMP edition of ALGLIB, you may choose
    -    // between serial and multithreaded versions of SMP-capable functions:
    -    // * serial version works as usual, in the context of the calling thread
    -    // * multithreaded version (with "smp_" prefix) creates (or wakes up) worker
    -    //   threads, inserts task in the worker queue, and waits for completion of
    -    //   the task. All processing is done in context of worker thread(s).
    -    //
    -    // NOTE: because starting/stopping worker threads costs thousands of CPU cycles,
    -    //       you should not use multithreading for lightweight computational problems.
    -    //
    -    // NOTE: some old POSIX-compatible operating systems do not support
    -    //       sysconf(_SC_NPROCESSORS_ONLN) system call which is required in order
    -    //       to automatically determine number of active cores. On these systems
    -    //       you should specify number of cores manually at compile time.
    -    //       Without it ALGLIB will run in single-threaded mode.
    -    //
    +Q = H(0)*H(2)*...*H(k-1),
     
    -    //
    -    // First, we perform parallel training of individual network with 5
    -    // restarts from random positions. These 5 rounds of  training  are
    -    // executed in parallel manner,  with  best  network  chosen  after
    -    // training.
    -    //
    -    // ALGLIB can use additional way to speed up computations -  divide
    -    // dataset   into   smaller   subsets   and   process these subsets
    -    // simultaneously. It allows us  to  efficiently  parallelize  even
    -    // single training round. This operation is performed automatically
    -    // for large datasets, but our toy dataset is too small.
    -    //
    -    smp_mlptrainnetwork(trn, network, 5, rep);
    +where k = min(m,n), and each H(i) is in the form
     
    -    //
    -    // Then, we perform parallel 10-fold cross-validation, with 5 random
    -    // restarts per each CV round. I.e., 5*10=50  networks  are trained
    -    // in total. All these operations can be parallelized.
    -    //
    -    // NOTE: again, ALGLIB can parallelize  calculation   of   gradient
    -    //       over entire dataset - but our dataset is too small.
    -    //
    -    smp_mlpkfoldcv(trn, network, 5, 10, rep);
    +H(i) = 1 - tau * v * (v^T)
     
    -    //
    -    // Finally, we train early stopping ensemble of 50 neural networks,
    -    // each  of them is trained with 5 random restarts. I.e.,  5*50=250
    -    // networks aretrained in total.
    -    //
    -    smp_mlptrainensemblees(trn, ensemble, 5, rep);
    -    return 0;
    -}
    +where tau is a scalar stored in Tau[I]; v - real vector,
    +so that v(0:i-1) = 0, v(i) = 1, v(i+1:m-1) stored in A(i+1:m-1,i).
     
    +  -- ALGLIB routine --
    +     17.02.2010
    +     Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::rmatrixqr( + real_2d_array& a, + ae_int_t m, + ae_int_t n, + real_1d_array& tau, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +
    /************************************************************************* +Partial unpacking of matrix Q from the QR decomposition of a matrix A -using namespace alglib; + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +Input parameters: + A - matrices Q and R in compact form. + Output of RMatrixQR subroutine. + M - number of rows in given matrix A. M>=0. + N - number of columns in given matrix A. N>=0. + Tau - scalar factors which are used to form Q. + Output of the RMatrixQR subroutine. + QColumns - required number of columns of matrix Q. M>=QColumns>=0. -int main(int argc, char **argv) -{ - // - // The very simple example on neural network: network is trained to reproduce - // small 2x2 multiplication table. - // - // NOTE: we use network with excessive amount of neurons, which guarantees - // almost exact reproduction of the training set. Generalization ability - // of such network is rather low, but we are not concerned with such - // questions in this basic demo. - // - mlptrainer trn; - multilayerperceptron network; - mlpreport rep; +Output parameters: + Q - first QColumns columns of matrix Q. + Array whose indexes range within [0..M-1, 0..QColumns-1]. + If QColumns=0, the array remains unchanged. - // - // Training set: - // * one row corresponds to one record A*B=C in the multiplication table - // * first two columns store A and B, last column stores C - // - // [1 * 1 = 1] - // [1 * 2 = 2] - // [2 * 1 = 2] - // [2 * 2 = 4] - // - real_2d_array xy = "[[1,1,1],[1,2,2],[2,1,2],[2,2,4]]"; + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixqrunpackq( + real_2d_array a, + ae_int_t m, + ae_int_t n, + real_1d_array tau, + ae_int_t qcolumns, + real_2d_array& q, + const xparams _params = alglib::xdefault); - // - // Network is created. - // Trainer object is created. - // Dataset is attached to trainer object. - // - mlpcreatetrainer(2, 1, trn); - mlpcreate1(2, 5, 1, network); - mlpsetdataset(trn, xy, 4); +
    + +
    +
    /************************************************************************* +Unpacking of matrix R from the QR decomposition of a matrix A - // - // Network is trained with 5 restarts from random positions - // - mlptrainnetwork(trn, network, 5, rep); +Input parameters: + A - matrices Q and R in compact form. + Output of RMatrixQR subroutine. + M - number of rows in given matrix A. M>=0. + N - number of columns in given matrix A. N>=0. - // - // 2*2=? - // - real_1d_array x = "[2,2]"; - real_1d_array y = "[0]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [4.000] - return 0; -} +Output parameters: + R - matrix R, array[0..M-1, 0..N-1]. + -- ALGLIB routine -- + 17.02.2010 + Bochkanov Sergey +*************************************************************************/ +
    void alglib::rmatrixqrunpackr( + real_2d_array a, + ae_int_t m, + ae_int_t n, + real_2d_array& r, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +
    /************************************************************************* +Reduction of a symmetric matrix which is given by its higher or lower +triangular part to a tridiagonal matrix using orthogonal similarity +transformation: Q'*A*Q=T. -using namespace alglib; + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +Input parameters: + A - matrix to be transformed + array with elements [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - storage format. If IsUpper = True, then matrix A is given + by its upper triangle, and the lower triangle is not used + and not modified by the algorithm, and vice versa + if IsUpper = False. -int main(int argc, char **argv) -{ - // - // Network with 2 inputs and 2 outputs is trained to reproduce vector function: - // (x0,x1) => (x0+x1, x0*x1) - // - // Informally speaking, we want neural network to simultaneously calculate - // both sum of two numbers and their product. - // - // NOTE: we use network with excessive amount of neurons, which guarantees - // almost exact reproduction of the training set. Generalization ability - // of such network is rather low, but we are not concerned with such - // questions in this basic demo. - // - mlptrainer trn; - multilayerperceptron network; - mlpreport rep; +Output parameters: + A - matrices T and Q in compact form (see lower) + Tau - array of factors which are forming matrices H(i) + array with elements [0..N-2]. + D - main diagonal of symmetric matrix T. + array with elements [0..N-1]. + E - secondary diagonal of symmetric matrix T. + array with elements [0..N-2]. - // - // Training set. One row corresponds to one record [A,B,A+B,A*B]. - // - // [ 1 1 1+1 1*1 ] - // [ 1 2 1+2 1*2 ] - // [ 2 1 2+1 2*1 ] - // [ 2 2 2+2 2*2 ] - // - real_2d_array xy = "[[1,1,2,1],[1,2,3,2],[2,1,3,2],[2,2,4,4]]"; - // - // Network is created. - // Trainer object is created. - // Dataset is attached to trainer object. - // - mlpcreatetrainer(2, 2, trn); - mlpcreate1(2, 5, 2, network); - mlpsetdataset(trn, xy, 4); + If IsUpper=True, the matrix Q is represented as a product of elementary + reflectors - // - // Network is trained with 5 restarts from random positions - // - mlptrainnetwork(trn, network, 5, rep); + Q = H(n-2) . . . H(2) H(0). - // - // 2+1=? - // 2*1=? - // - real_1d_array x = "[2,1]"; - real_1d_array y = "[0,0]"; - mlpprocess(network, x, y); - printf("%s\n", y.tostring(1).c_str()); // EXPECTED: [3.000,2.000] - return 0; -} + Each H(i) has the form + H(i) = I - tau * v * v' -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "dataanalysis.h"
    +  where tau is a real scalar, and v is a real vector with
    +  v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in
    +  A(0:i-1,i+1), and tau in TAU(i).
     
    -using namespace alglib;
    +  If IsUpper=False, the matrix Q is represented as a product of elementary
    +  reflectors
     
    +     Q = H(0) H(2) . . . H(n-2).
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // Trainer object is used to train network. It stores dataset, training settings,
    -    // and other information which is NOT part of neural network. You should use
    -    // trainer object as follows:
    -    // (1) you create trainer object and specify task type (classification/regression)
    -    //     and number of inputs/outputs
    -    // (2) you add dataset to the trainer object
    -    // (3) you may change training settings (stopping criteria or weight decay)
    -    // (4) finally, you may train one or more networks
    -    //
    -    // You may interleave stages 2...4 and repeat them many times. Trainer object
    -    // remembers its internal state and can be used several times after its creation
    -    // and initialization.
    -    //
    -    mlptrainer trn;
    +  Each H(i) has the form
     
    -    //
    -    // Stage 1: object creation.
    -    //
    -    // We have to specify number of inputs and outputs. Trainer object can be used
    -    // only for problems with same number of inputs/outputs as was specified during
    -    // its creation.
    -    //
    -    // In case you want to train SOFTMAX-normalized network which solves classification
    -    // problems,  you  must  use  another  function  to  create  trainer  object:
    -    // mlpcreatetrainercls().
    -    //
    -    // Below we create trainer object which can be used to train regression networks
    -    // with 2 inputs and 1 output.
    -    //
    -    mlpcreatetrainer(2, 1, trn);
    +     H(i) = I - tau * v * v'
     
    -    //
    -    // Stage 2: specification of the training set
    -    //
    -    // By default trainer object stores empty dataset. So to solve your non-empty problem
    -    // you have to set dataset by passing to trainer dense or sparse matrix.
    -    //
    -    // One row of the matrix corresponds to one record A*B=C in the multiplication table.
    -    // First two columns store A and B, last column stores C
    -    //
    -    //     [1 * 1 = 1]   [ 1 1 1 ]
    -    //     [1 * 2 = 2]   [ 1 2 2 ]
    -    //     [2 * 1 = 2] = [ 2 1 2 ]
    -    //     [2 * 2 = 4]   [ 2 2 4 ]
    -    //
    -    real_2d_array xy = "[[1,1,1],[1,2,2],[2,1,2],[2,2,4]]";
    -    mlpsetdataset(trn, xy, 4);
    +  where tau is a real scalar, and v is a real vector with
    +  v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in A(i+2:n-1,i),
    +  and tau in TAU(i).
     
    -    //
    -    // Stage 3: modification of the training parameters.
    -    //
    -    // You may modify parameters like weights decay or stopping criteria:
    -    // * we set moderate weight decay
    -    // * we choose iterations limit as stopping condition (another condition - step size -
    -    //   is zero, which means than this condition is not active)
    -    //
    -    double wstep = 0.000;
    -    ae_int_t maxits = 100;
    -    mlpsetdecay(trn, 0.01);
    -    mlpsetcond(trn, wstep, maxits);
    +  The contents of A on exit are illustrated by the following examples
    +  with n = 5:
     
    -    //
    -    // Stage 4: training.
    -    //
    -    // We will train several networks with different architecture using same trainer object.
    -    // We may change training parameters or even dataset, so different networks are trained
    -    // differently. But in this simple example we will train all networks with same settings.
    -    //
    -    // We create and train three networks:
    -    // * network 1 has 2x1 architecture     (2 inputs, no hidden neurons, 1 output)
    -    // * network 2 has 2x5x1 architecture   (2 inputs, 5 hidden neurons, 1 output)
    -    // * network 3 has 2x5x5x1 architecture (2 inputs, two hidden layers, 1 output)
    -    //
    -    // NOTE: these networks solve regression problems. For classification problems you
    -    //       should use mlpcreatec0/c1/c2 to create neural networks which have SOFTMAX-
    -    //       normalized outputs.
    -    //
    -    multilayerperceptron net1;
    -    multilayerperceptron net2;
    -    multilayerperceptron net3;
    -    mlpreport rep;
    +  if UPLO = 'U':                       if UPLO = 'L':
     
    -    mlpcreate0(2, 1, net1);
    -    mlpcreate1(2, 5, 1, net2);
    -    mlpcreate2(2, 5, 5, 1, net3);
    +    (  d   e   v1  v2  v3 )              (  d                  )
    +    (      d   e   v2  v3 )              (  e   d              )
    +    (          d   e   v3 )              (  v0  e   d          )
    +    (              d   e  )              (  v0  v1  e   d      )
    +    (                  d  )              (  v0  v1  v2  e   d  )
     
    -    mlptrainnetwork(trn, net1, 5, rep);
    -    mlptrainnetwork(trn, net2, 5, rep);
    -    mlptrainnetwork(trn, net3, 5, rep);
    -    return 0;
    -}
    +  where d and e denote diagonal and off-diagonal elements of T, and vi
    +  denotes an element of the vector defining H(i).
     
    +  -- LAPACK routine (version 3.0) --
    +     Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
    +     Courant Institute, Argonne National Lab, and Rice University
    +     October 31, 1992
    +*************************************************************************/
    +
    void alglib::smatrixtd( + real_2d_array& a, + ae_int_t n, + bool isupper, + real_1d_array& tau, + real_1d_array& d, + real_1d_array& e, + const xparams _params = alglib::xdefault); -
    + + +
    +
    /************************************************************************* +Unpacking matrix Q which reduces symmetric matrix to a tridiagonal +form. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +Input parameters: + A - the result of a SMatrixTD subroutine + N - size of matrix A. + IsUpper - storage format (a parameter of SMatrixTD subroutine) + Tau - the result of a SMatrixTD subroutine + +Output parameters: + Q - transformation matrix. + array with elements [0..N-1, 0..N-1]. + + -- ALGLIB -- + Copyright 2005-2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::smatrixtdunpackq( + real_2d_array a, + ae_int_t n, + bool isupper, + real_1d_array tau, + real_2d_array& q, + const xparams _params = alglib::xdefault); + +
    + - + +
    +
    /************************************************************************* +Parametric spline inteprolant: 2-dimensional curve. + +You should not try to access its members directly - use PSpline2XXXXXXXX() +functions instead. +*************************************************************************/ +
    class pspline2interpolant +{ +}; + +
    +
     
    /************************************************************************* +Parametric spline inteprolant: 3-dimensional curve. +You should not try to access its members directly - use PSpline3XXXXXXXX() +functions instead. *************************************************************************/ -
    class kdtree +
    class pspline3interpolant { };
    - +
     
    /************************************************************************* -KD-tree creation - -This subroutine creates KD-tree from set of X-values and optional Y-values +This subroutine fits piecewise linear curve to points with Ramer-Douglas- +Peucker algorithm. This function performs PARAMETRIC fit, i.e. it can be +used to fit curves like circles. -INPUT PARAMETERS - XY - dataset, array[0..N-1,0..NX+NY-1]. - one row corresponds to one point. - first NX columns contain X-values, next NY (NY may be zero) - columns may contain associated Y-values - N - number of points, N>=0. - NX - space dimension, NX>=1. - NY - number of optional Y-values, NY>=0. - NormType- norm type: - * 0 denotes infinity-norm - * 1 denotes 1-norm - * 2 denotes 2-norm (Euclidean norm) +On input it accepts dataset which describes parametric multidimensional +curve X(t), with X being vector, and t taking values in [0,N), where N is +a number of points in dataset. As result, it returns reduced dataset X2, +which can be used to build parametric curve X2(t), which approximates +X(t) with desired precision (or has specified number of sections). -OUTPUT PARAMETERS - KDT - KD-tree +INPUT PARAMETERS: + X - array of multidimensional points: + * at least N elements, leading N elements are used if more + than N elements were specified + * order of points is IMPORTANT because it is parametric + fit + * each row of array is one point which has D coordinates + N - number of elements in X + D - number of dimensions (elements per row of X) + StopM - stopping condition - desired number of sections: + * at most M sections are generated by this function + * less than M sections can be generated if we have N<M + (or some X are non-distinct). + * zero StopM means that algorithm does not stop after + achieving some pre-specified section count + StopEps - stopping condition - desired precision: + * algorithm stops after error in each section is at most Eps + * zero Eps means that algorithm does not stop after + achieving some pre-specified precision -NOTES +OUTPUT PARAMETERS: + X2 - array of corner points for piecewise approximation, + has length NSections+1 or zero (for NSections=0). + Idx2 - array of indexes (parameter values): + * has length NSections+1 or zero (for NSections=0). + * each element of Idx2 corresponds to same-numbered + element of X2 + * each element of Idx2 is index of corresponding element + of X2 at original array X, i.e. I-th row of X2 is + Idx2[I]-th row of X. + * elements of Idx2 can be treated as parameter values + which should be used when building new parametric curve + * Idx2[0]=0, Idx2[NSections]=N-1 + NSections- number of sections found by algorithm, NSections<=M, + NSections can be zero for degenerate datasets + (N<=1 or all X[] are non-distinct). -1. KD-tree creation have O(N*logN) complexity and O(N*(2*NX+NY)) memory - requirements. -2. Although KD-trees may be used with any combination of N and NX, they - are more efficient than brute-force search only when N >> 4^NX. So they - are most useful in low-dimensional tasks (NX=2, NX=3). NX=1 is another - inefficient case, because simple binary search (without additional - structures) is much more efficient in such tasks than KD-trees. +NOTE: algorithm stops after: + a) dividing curve into StopM sections + b) achieving required precision StopEps + c) dividing curve into N-1 sections + If both StopM and StopEps are non-zero, algorithm is stopped by the + FIRST criterion which is satisfied. In case both StopM and StopEps + are zero, algorithm stops because of (c). -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 02.10.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreebuild( - real_2d_array xy, - ae_int_t nx, - ae_int_t ny, - ae_int_t normtype, - kdtree& kdt); -void alglib::kdtreebuild( - real_2d_array xy, +
    void alglib::parametricrdpfixed( + real_2d_array x, ae_int_t n, - ae_int_t nx, - ae_int_t ny, - ae_int_t normtype, - kdtree& kdt); + ae_int_t d, + ae_int_t stopm, + double stopeps, + real_2d_array& x2, + integer_1d_array& idx2, + ae_int_t& nsections, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -KD-tree creation +This function calculates arc length, i.e. length of curve between t=a +and t=b. -This subroutine creates KD-tree from set of X-values, integer tags and -optional Y-values +INPUT PARAMETERS: + P - parametric spline interpolant + A,B - parameter values corresponding to arc ends: + * B>A will result in positive length returned + * B<A will result in negative length returned -INPUT PARAMETERS - XY - dataset, array[0..N-1,0..NX+NY-1]. - one row corresponds to one point. - first NX columns contain X-values, next NY (NY may be zero) - columns may contain associated Y-values - Tags - tags, array[0..N-1], contains integer tags associated - with points. - N - number of points, N>=0 - NX - space dimension, NX>=1. - NY - number of optional Y-values, NY>=0. - NormType- norm type: - * 0 denotes infinity-norm - * 1 denotes 1-norm - * 2 denotes 2-norm (Euclidean norm) +RESULT: + length of arc starting at T=A and ending at T=B. -OUTPUT PARAMETERS - KDT - KD-tree -NOTES + -- ALGLIB PROJECT -- + Copyright 30.05.2010 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::pspline2arclength( + pspline2interpolant p, + double a, + double b, + const xparams _params = alglib::xdefault); -1. KD-tree creation have O(N*logN) complexity and O(N*(2*NX+NY)) memory - requirements. -2. Although KD-trees may be used with any combination of N and NX, they - are more efficient than brute-force search only when N >> 4^NX. So they - are most useful in low-dimensional tasks (NX=2, NX=3). NX=1 is another - inefficient case, because simple binary search (without additional - structures) is much more efficient in such tasks than KD-trees. +
    + +
    +
    /************************************************************************* +This function builds non-periodic 2-dimensional parametric spline which +starts at (X[0],Y[0]) and ends at (X[N-1],Y[N-1]). - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey +INPUT PARAMETERS: + XY - points, array[0..N-1,0..1]. + XY[I,0:1] corresponds to the Ith point. + Order of points is important! + N - points count, N>=5 for Akima splines, N>=2 for other types of + splines. + ST - spline type: + * 0 Akima spline + * 1 parabolically terminated Catmull-Rom spline (Tension=0) + * 2 parabolically terminated cubic spline + PT - parameterization type: + * 0 uniform + * 1 chord length + * 2 centripetal + +OUTPUT PARAMETERS: + P - parametric spline interpolant + + +NOTES: +* this function assumes that there all consequent points are distinct. + I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. + However, non-consequent points may coincide, i.e. we can have (x0,y0)= + =(x2,y2). + + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreebuildtagged( - real_2d_array xy, - integer_1d_array tags, - ae_int_t nx, - ae_int_t ny, - ae_int_t normtype, - kdtree& kdt); -void alglib::kdtreebuildtagged( +
    void alglib::pspline2build( real_2d_array xy, - integer_1d_array tags, ae_int_t n, - ae_int_t nx, - ae_int_t ny, - ae_int_t normtype, - kdtree& kdt); + ae_int_t st, + ae_int_t pt, + pspline2interpolant& p, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -K-NN query: approximate K nearest neighbors +This function builds periodic 2-dimensional parametric spline which +starts at (X[0],Y[0]), goes through all points to (X[N-1],Y[N-1]) and then +back to (X[0],Y[0]). -INPUT PARAMETERS - KDT - KD-tree - X - point, array[0..NX-1]. - K - number of neighbors to return, K>=1 - SelfMatch - whether self-matches are allowed: - * if True, nearest neighbor may be the point itself - (if it exists in original dataset) - * if False, then only points with non-zero distance - are returned - * if not given, considered True - Eps - approximation factor, Eps>=0. eps-approximate nearest - neighbor is a neighbor whose distance from X is at - most (1+eps) times distance of true nearest neighbor. +INPUT PARAMETERS: + XY - points, array[0..N-1,0..1]. + XY[I,0:1] corresponds to the Ith point. + XY[N-1,0:1] must be different from XY[0,0:1]. + Order of points is important! + N - points count, N>=3 for other types of splines. + ST - spline type: + * 1 Catmull-Rom spline (Tension=0) with cyclic boundary conditions + * 2 cubic spline with cyclic boundary conditions + PT - parameterization type: + * 0 uniform + * 1 chord length + * 2 centripetal -RESULT - number of actual neighbors found (either K or N, if K>N). +OUTPUT PARAMETERS: + P - parametric spline interpolant -NOTES - significant performance gain may be achieved only when Eps is is on - the order of magnitude of 1 or larger. -This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -these results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances +NOTES: +* this function assumes that there all consequent points are distinct. + I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. + However, non-consequent points may coincide, i.e. we can have (x0,y0)= + =(x2,y2). +* last point of sequence is NOT equal to the first point. You shouldn't + make curve "explicitly periodic" by making them equal. - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::kdtreequeryaknn( - kdtree kdt, - real_1d_array x, - ae_int_t k, - double eps); -ae_int_t alglib::kdtreequeryaknn( - kdtree kdt, - real_1d_array x, - ae_int_t k, - bool selfmatch, - double eps); +
    void alglib::pspline2buildperiodic( + real_2d_array xy, + ae_int_t n, + ae_int_t st, + ae_int_t pt, + pspline2interpolant& p, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -K-NN query: K nearest neighbors +This function calculates the value of the parametric spline for a given +value of parameter T -INPUT PARAMETERS - KDT - KD-tree - X - point, array[0..NX-1]. - K - number of neighbors to return, K>=1 - SelfMatch - whether self-matches are allowed: - * if True, nearest neighbor may be the point itself - (if it exists in original dataset) - * if False, then only points with non-zero distance - are returned - * if not given, considered True +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). -RESULT - number of actual neighbors found (either K or N, if K>N). +OUTPUT PARAMETERS: + X - X-position + Y - Y-position -This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -these results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::kdtreequeryknn(kdtree kdt, real_1d_array x, ae_int_t k); -ae_int_t alglib::kdtreequeryknn( - kdtree kdt, - real_1d_array x, - ae_int_t k, - bool selfmatch); +
    void alglib::pspline2calc( + pspline2interpolant p, + double t, + double& x, + double& y, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Distances from last query +This function calculates derivative, i.e. it returns (dX/dT,dY/dT). -INPUT PARAMETERS - KDT - KD-tree - R - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). -OUTPUT PARAMETERS - R - filled with distances (in corresponding norm) +OUTPUT PARAMETERS: + X - X-value + DX - X-derivative + Y - Y-value + DY - Y-derivative -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. -SEE ALSO -* KDTreeQueryResultsX() X-values -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsTags() tag values + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::pspline2diff( + pspline2interpolant p, + double t, + double& x, + double& dx, + double& y, + double& dy, + const xparams _params = alglib::xdefault); - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey +
    + +
    +
    /************************************************************************* +This function calculates first and second derivative with respect to T. + +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). + +OUTPUT PARAMETERS: + X - X-value + DX - derivative + D2X - second derivative + Y - Y-value + DY - derivative + D2Y - second derivative + + + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultsdistances(kdtree kdt, real_1d_array& r); +
    void alglib::pspline2diff2( + pspline2interpolant p, + double t, + double& x, + double& dx, + double& d2x, + double& y, + double& dy, + double& d2y, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Distances from last query; 'interactive' variant for languages like Python -which support constructs like "R = KDTreeQueryResultsDistancesI(KDT)" -and interactive mode of interpreter. +This function returns vector of parameter values correspoding to points. -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +I.e. for P created from (X[0],Y[0])...(X[N-1],Y[N-1]) and U=TValues(P) we +have + (X[0],Y[0]) = PSpline2Calc(P,U[0]), + (X[1],Y[1]) = PSpline2Calc(P,U[1]), + (X[2],Y[2]) = PSpline2Calc(P,U[2]), + ... - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey +INPUT PARAMETERS: + P - parametric spline interpolant + +OUTPUT PARAMETERS: + N - array size + T - array[0..N-1] + + +NOTES: +* for non-periodic splines U[0]=0, U[0]<U[1]<...<U[N-1], U[N-1]=1 +* for periodic splines U[0]=0, U[0]<U[1]<...<U[N-1], U[N-1]<1 + + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultsdistancesi(kdtree kdt, real_1d_array& r); +
    void alglib::pspline2parametervalues( + pspline2interpolant p, + ae_int_t& n, + real_1d_array& t, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Tags from last query +This function calculates tangent vector for a given value of parameter T -INPUT PARAMETERS - KDT - KD-tree - Tags - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). -OUTPUT PARAMETERS - Tags - filled with tags associated with points, - or, when no tags were supplied, with zeros +OUTPUT PARAMETERS: + X - X-component of tangent vector (normalized) + Y - Y-component of tangent vector (normalized) -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +NOTE: + X^2+Y^2 is either 1 (for non-zero tangent vector) or 0. -SEE ALSO -* KDTreeQueryResultsX() X-values -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsDistances() distances - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultstags(kdtree kdt, integer_1d_array& tags); +
    void alglib::pspline2tangent( + pspline2interpolant p, + double t, + double& x, + double& y, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Tags from last query; 'interactive' variant for languages like Python -which support constructs like "Tags = KDTreeQueryResultsTagsI(KDT)" and -interactive mode of interpreter. +This function calculates arc length, i.e. length of curve between t=a +and t=b. -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +INPUT PARAMETERS: + P - parametric spline interpolant + A,B - parameter values corresponding to arc ends: + * B>A will result in positive length returned + * B<A will result in negative length returned - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey +RESULT: + length of arc starting at T=A and ending at T=B. + + + -- ALGLIB PROJECT -- + Copyright 30.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultstagsi(kdtree kdt, integer_1d_array& tags); +
    double alglib::pspline3arclength( + pspline3interpolant p, + double a, + double b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -X-values from last query - -INPUT PARAMETERS - KDT - KD-tree - X - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. - -OUTPUT PARAMETERS - X - rows are filled with X-values - -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +This function builds non-periodic 3-dimensional parametric spline which +starts at (X[0],Y[0],Z[0]) and ends at (X[N-1],Y[N-1],Z[N-1]). -SEE ALSO -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsTags() tag values -* KDTreeQueryResultsDistances() distances +Same as PSpline2Build() function, but for 3D, so we won't duplicate its +description here. - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultsx(kdtree kdt, real_2d_array& x); +
    void alglib::pspline3build( + real_2d_array xy, + ae_int_t n, + ae_int_t st, + ae_int_t pt, + pspline3interpolant& p, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -X-values from last query; 'interactive' variant for languages like Python -which support constructs like "X = KDTreeQueryResultsXI(KDT)" and -interactive mode of interpreter. +This function builds periodic 3-dimensional parametric spline which +starts at (X[0],Y[0],Z[0]), goes through all points to (X[N-1],Y[N-1],Z[N-1]) +and then back to (X[0],Y[0],Z[0]). -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +Same as PSpline2Build() function, but for 3D, so we won't duplicate its +description here. - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultsxi(kdtree kdt, real_2d_array& x); +
    void alglib::pspline3buildperiodic( + real_2d_array xy, + ae_int_t n, + ae_int_t st, + ae_int_t pt, + pspline3interpolant& p, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -X- and Y-values from last query - -INPUT PARAMETERS - KDT - KD-tree - XY - possibly pre-allocated buffer. If XY is too small to store - result, it is resized. If size(XY) is enough to store - result, it is left unchanged. +This function calculates the value of the parametric spline for a given +value of parameter T. -OUTPUT PARAMETERS - XY - rows are filled with points: first NX columns with - X-values, next NY columns - with Y-values. +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +OUTPUT PARAMETERS: + X - X-position + Y - Y-position + Z - Z-position -SEE ALSO -* KDTreeQueryResultsX() X-values -* KDTreeQueryResultsTags() tag values -* KDTreeQueryResultsDistances() distances - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultsxy(kdtree kdt, real_2d_array& xy); +
    void alglib::pspline3calc( + pspline3interpolant p, + double t, + double& x, + double& y, + double& z, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -XY-values from last query; 'interactive' variant for languages like Python -which support constructs like "XY = KDTreeQueryResultsXYI(KDT)" and -interactive mode of interpreter. +This function calculates derivative, i.e. it returns (dX/dT,dY/dT,dZ/dT). -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey +OUTPUT PARAMETERS: + X - X-value + DX - X-derivative + Y - Y-value + DY - Y-derivative + Z - Z-value + DZ - Z-derivative + + + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::kdtreequeryresultsxyi(kdtree kdt, real_2d_array& xy); +
    void alglib::pspline3diff( + pspline3interpolant p, + double t, + double& x, + double& dx, + double& y, + double& dy, + double& z, + double& dz, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -R-NN query: all points within R-sphere centered at X +This function calculates first and second derivative with respect to T. -INPUT PARAMETERS - KDT - KD-tree - X - point, array[0..NX-1]. - R - radius of sphere (in corresponding norm), R>0 - SelfMatch - whether self-matches are allowed: - * if True, nearest neighbor may be the point itself - (if it exists in original dataset) - * if False, then only points with non-zero distance - are returned - * if not given, considered True +INPUT PARAMETERS: + P - parametric spline interpolant + T - point: + * T in [0,1] corresponds to interval spanned by points + * for non-periodic splines T<0 (or T>1) correspond to parts of + the curve before the first (after the last) point + * for periodic splines T<0 (or T>1) are projected into [0,1] + by making T=T-floor(T). -RESULT - number of neighbors found, >=0 +OUTPUT PARAMETERS: + X - X-value + DX - derivative + D2X - second derivative + Y - Y-value + DY - derivative + D2Y - second derivative + Z - Z-value + DZ - derivative + D2Z - second derivative -This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -actual results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::kdtreequeryrnn(kdtree kdt, real_1d_array x, double r); -ae_int_t alglib::kdtreequeryrnn( - kdtree kdt, - real_1d_array x, - double r, - bool selfmatch); +
    void alglib::pspline3diff2( + pspline3interpolant p, + double t, + double& x, + double& dx, + double& d2x, + double& y, + double& dy, + double& d2y, + double& z, + double& dz, + double& d2z, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function serializes data structure to string. +This function returns vector of parameter values correspoding to points. -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. +Same as PSpline2ParameterValues(), but for 3D. + + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey *************************************************************************/ -
    void kdtreeserialize(kdtree &obj, std::string &s_out); +
    void alglib::pspline3parametervalues( + pspline3interpolant p, + ae_int_t& n, + real_1d_array& t, + const xparams _params = alglib::xdefault); +
    - +
     
    /************************************************************************* -This function unserializes data structure from string. -*************************************************************************/ -
    void kdtreeunserialize(std::string &s_in, kdtree &obj); -
    - -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "alglibmisc.h"
    +This function  calculates  tangent vector for a given value of parameter T
     
    -using namespace alglib;
    +INPUT PARAMETERS:
    +    P   -   parametric spline interpolant
    +    T   -   point:
    +            * T in [0,1] corresponds to interval spanned by points
    +            * for non-periodic splines T<0 (or T>1) correspond to parts of
    +              the curve before the first (after the last) point
    +            * for periodic splines T<0 (or T>1) are projected  into  [0,1]
    +              by making T=T-floor(T).
     
    +OUTPUT PARAMETERS:
    +    X    -   X-component of tangent vector (normalized)
    +    Y    -   Y-component of tangent vector (normalized)
    +    Z    -   Z-component of tangent vector (normalized)
     
    -int main(int argc, char **argv)
    -{
    -    real_2d_array a = "[[0,0],[0,1],[1,0],[1,1]]";
    -    ae_int_t nx = 2;
    -    ae_int_t ny = 0;
    -    ae_int_t normtype = 2;
    -    kdtree kdt;
    -    real_1d_array x;
    -    real_2d_array r = "[[]]";
    -    ae_int_t k;
    -    kdtreebuild(a, nx, ny, normtype, kdt);
    -    x = "[-1,0]";
    -    k = kdtreequeryknn(kdt, x, 1);
    -    printf("%d\n", int(k)); // EXPECTED: 1
    -    kdtreequeryresultsx(kdt, r);
    -    printf("%s\n", r.tostring(1).c_str()); // EXPECTED: [[0,0]]
    -    return 0;
    -}
    +NOTE:
    +    X^2+Y^2+Z^2 is either 1 (for non-zero tangent vector) or 0.
     
     
    -
    + -- ALGLIB PROJECT -- + Copyright 28.05.2010 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::pspline3tangent( + pspline3interpolant p, + double t, + double& x, + double& y, + double& z, + const xparams _params = alglib::xdefault); + +
    +
     #include "stdafx.h"
     #include <stdlib.h>
     #include <stdio.h>
     #include <math.h>
    -#include "alglibmisc.h"
    +#include "interpolation.h"
     
     using namespace alglib;
     
     
     int main(int argc, char **argv)
     {
    -    real_2d_array a = "[[0,0],[0,1],[1,0],[1,1]]";
    -    ae_int_t nx = 2;
    -    ae_int_t ny = 0;
    -    ae_int_t normtype = 2;
    -    kdtree kdt0;
    -    kdtree kdt1;
    -    std::string s;
    -    real_1d_array x;
    -    real_2d_array r0 = "[[]]";
    -    real_2d_array r1 = "[[]]";
    -
         //
    -    // Build tree and serialize it
    +    // We use RDP algorithm to approximate parametric 2D curve given by
    +    // locations in t=0,1,2,3 (see below), which form piecewise linear
    +    // trajectory through D-dimensional space (2-dimensional in our example).
    +    // 
    +    //     |
    +    //     |
    +    //     -     *     *     X2................X3
    +    //     |                .
    +    //     |               .
    +    //     -     *     *  .  *     *     *     *
    +    //     |             .
    +    //     |            .
    +    //     -     *     X1    *     *     *     *
    +    //     |      .....
    +    //     |  ....
    +    //     X0----|-----|-----|-----|-----|-----|---
         //
    -    kdtreebuild(a, nx, ny, normtype, kdt0);
    -    alglib::kdtreeserialize(kdt0, s);
    -    alglib::kdtreeunserialize(s, kdt1);
    +    ae_int_t npoints = 4;
    +    ae_int_t ndimensions = 2;
    +    real_2d_array x = "[[0,0],[2,1],[3,3],[6,3]]";
     
         //
    -    // Compare results from KNN queries
    +    // Approximation of parametric curve is performed by another parametric curve
    +    // with lesser amount of points. It allows to work with "compressed"
    +    // representation, which needs smaller amount of memory. Say, in our example
    +    // (we allow points with error smaller than 0.8) approximation will have
    +    // just two sequential sections connecting X0 with X2, and X2 with X3.
    +    // 
    +    //     |
    +    //     |
    +    //     -     *     *     X2................X3
    +    //     |               . 
    +    //     |             .  
    +    //     -     *     .     *     *     *     *
    +    //     |         .    
    +    //     |       .     
    +    //     -     .     X1    *     *     *     *
    +    //     |   .       
    +    //     | .    
    +    //     X0----|-----|-----|-----|-----|-----|---
         //
    -    x = "[-1,0]";
    -    kdtreequeryknn(kdt0, x, 1);
    -    kdtreequeryresultsx(kdt0, r0);
    -    kdtreequeryknn(kdt1, x, 1);
    -    kdtreequeryresultsx(kdt1, r1);
    -    printf("%s\n", r0.tostring(1).c_str()); // EXPECTED: [[0,0]]
    -    printf("%s\n", r1.tostring(1).c_str()); // EXPECTED: [[0,0]]
    +    //
    +    real_2d_array y;
    +    integer_1d_array idxy;
    +    ae_int_t nsections;
    +    ae_int_t limitcnt = 0;
    +    double limiteps = 0.8;
    +    parametricrdpfixed(x, npoints, ndimensions, limitcnt, limiteps, y, idxy, nsections);
    +    printf("%d\n", int(nsections)); // EXPECTED: 2
    +    printf("%s\n", idxy.tostring().c_str()); // EXPECTED: [0,2,3]
         return 0;
     }
     
     
    -
    - - -
    -
    /************************************************************************* - -*************************************************************************/ -
    class nleqreport -{ - ae_int_t iterationscount; - ae_int_t nfunc; - ae_int_t njac; - ae_int_t terminationtype; -}; - -
    - -
    -
    /************************************************************************* - -*************************************************************************/ -
    class nleqstate -{ -}; - -
    - -
    -
    /************************************************************************* - LEVENBERG-MARQUARDT-LIKE NONLINEAR SOLVER - -DESCRIPTION: -This algorithm solves system of nonlinear equations - F[0](x[0], ..., x[n-1]) = 0 - F[1](x[0], ..., x[n-1]) = 0 - ... - F[M-1](x[0], ..., x[n-1]) = 0 -with M/N do not necessarily coincide. Algorithm converges quadratically -under following conditions: - * the solution set XS is nonempty - * for some xs in XS there exist such neighbourhood N(xs) that: - * vector function F(x) and its Jacobian J(x) are continuously - differentiable on N - * ||F(x)|| provides local error bound on N, i.e. there exists such - c1, that ||F(x)||>c1*distance(x,XS) -Note that these conditions are much more weaker than usual non-singularity -conditions. For example, algorithm will converge for any affine function -F (whether its Jacobian singular or not). - - -REQUIREMENTS: -Algorithm will request following information during its operation: -* function vector F[] and Jacobian matrix at given point X -* value of merit function f(x)=F[0]^2(x)+...+F[M-1]^2(x) at given point X - - -USAGE: -1. User initializes algorithm state with NLEQCreateLM() call -2. User tunes solver parameters with NLEQSetCond(), NLEQSetStpMax() and - other functions -3. User calls NLEQSolve() function which takes algorithm state and - pointers (delegates, etc.) to callback functions which calculate merit - function value and Jacobian. -4. User calls NLEQResults() to get solution -5. Optionally, user may call NLEQRestartFrom() to solve another problem - with same parameters (N/M) but another starting point and/or another - function vector. NLEQRestartFrom() allows to reuse already initialized - structure. - - -INPUT PARAMETERS: - N - space dimension, N>1: - * if provided, only leading N elements of X are used - * if not provided, determined automatically from size of X - M - system size - X - starting point - - -OUTPUT PARAMETERS: - State - structure which stores algorithm state - - -NOTES: -1. you may tune stopping conditions with NLEQSetCond() function -2. if target function contains exp() or other fast growing functions, and - optimization algorithm makes too large steps which leads to overflow, - use NLEQSetStpMax() function to bound algorithm's steps. -3. this algorithm is a slightly modified implementation of the method - described in 'Levenberg-Marquardt method for constrained nonlinear - equations with strong local convergence properties' by Christian Kanzow - Nobuo Yamashita and Masao Fukushima and further developed in 'On the - convergence of a New Levenberg-Marquardt Method' by Jin-yan Fan and - Ya-Xiang Yuan. - - - -- ALGLIB -- - Copyright 20.08.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::nleqcreatelm(ae_int_t m, real_1d_array x, nleqstate& state); -void alglib::nleqcreatelm( - ae_int_t n, - ae_int_t m, - real_1d_array x, - nleqstate& state); - -
    - -
    -
    /************************************************************************* -This subroutine restarts CG algorithm from new point. All optimization -parameters are left unchanged. - -This function allows to solve multiple optimization problems (which -must have same number of dimensions) without object reallocation penalty. - -INPUT PARAMETERS: - State - structure used for reverse communication previously - allocated with MinCGCreate call. - X - new starting point. - BndL - new lower bounds - BndU - new upper bounds - - -- ALGLIB -- - Copyright 30.07.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::nleqrestartfrom(nleqstate state, real_1d_array x); - -
    - + + +
     
    /************************************************************************* -NLEQ solver results +Principal components analysis + +This function builds orthogonal basis where first axis corresponds to +direction with maximum variance, second axis maximizes variance in the +subspace orthogonal to first axis and so on. + +This function builds FULL basis, i.e. returns N vectors corresponding to +ALL directions, no matter how informative. If you need just a few (say, +10 or 50) of the most important directions, you may find it faster to use +one of the reduced versions: +* pcatruncatedsubspace() - for subspace iteration based method + +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - State - algorithm state. + X - dataset, array[0..NPoints-1,0..NVars-1]. + matrix contains ONLY INDEPENDENT VARIABLES. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 OUTPUT PARAMETERS: - X - array[0..N-1], solution - Rep - optimization report: - * Rep.TerminationType completetion code: - * -4 ERROR: algorithm has converged to the - stationary point Xf which is local minimum of - f=F[0]^2+...+F[m-1]^2, but is not solution of - nonlinear system. - * 1 sqrt(f)<=EpsF. - * 5 MaxIts steps was taken - * 7 stopping conditions are too stringent, - further improvement is impossible - * Rep.IterationsCount contains iterations count - * NFEV countains number of function calculations - * ActiveConstraints contains number of active constraints + Info - return code: + * -4, if SVD subroutine haven't converged + * -1, if wrong parameters has been passed (NPoints<0, + NVars<1) + * 1, if task is solved + S2 - array[0..NVars-1]. variance values corresponding + to basis vectors. + V - array[0..NVars-1,0..NVars-1] + matrix, whose columns store basis vectors. -- ALGLIB -- - Copyright 20.08.2009 by Bochkanov Sergey + Copyright 25.08.2008 by Bochkanov Sergey *************************************************************************/ -
    void alglib::nleqresults( - nleqstate state, - real_1d_array& x, - nleqreport& rep); +
    void alglib::pcabuildbasis( + real_2d_array x, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t& info, + real_1d_array& s2, + real_2d_array& v, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -NLEQ solver results +Principal components analysis -Buffered implementation of NLEQResults(), which uses pre-allocated buffer -to store X[]. If buffer size is too small, it resizes buffer. It is -intended to be used in the inner cycles of performance critical algorithms -where array reallocation penalty is too large to be ignored. +This function performs truncated PCA, i.e. returns just a few most important +directions. - -- ALGLIB -- - Copyright 20.08.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::nleqresultsbuf( - nleqstate state, - real_1d_array& x, - nleqreport& rep); +Internally it uses iterative eigensolver which is very efficient when only +a minor fraction of full basis is required. Thus, if you need full basis, +it is better to use pcabuildbasis() function. -
    - -
    -
    /************************************************************************* -This function sets stopping conditions for the nonlinear solver +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - State - structure which stores algorithm state - EpsF - >=0 - The subroutine finishes its work if on k+1-th iteration - the condition ||F||<=EpsF is satisfied - MaxIts - maximum number of iterations. If MaxIts=0, the number of - iterations is unlimited. + X - dataset, array[0..NPoints-1,0..NVars-1]. + matrix contains ONLY INDEPENDENT VARIABLES. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NNeeded - number of requested components, in [1,NVars] range; + this function is efficient only for NNeeded<<NVars. + Eps - desired precision of vectors returned; underlying + solver will stop iterations as soon as absolute error + in corresponding singular values reduces to roughly + eps*MAX(lambda[]), with lambda[] being array of eigen + values. + Zero value means that algorithm performs number of + iterations specified by maxits parameter, without + paying attention to precision. + MaxIts - number of iterations performed by subspace iteration + method. Zero value means that no limit on iteration + count is placed (eps-based stopping condition is used). -Passing EpsF=0 and MaxIts=0 simultaneously will lead to automatic -stopping criterion selection (small EpsF). -NOTES: +OUTPUT PARAMETERS: + S2 - array[NNeeded]. Variance values corresponding + to basis vectors. + V - array[NVars,NNeeded] + matrix, whose columns store basis vectors. + +NOTE: passing eps=0 and maxits=0 results in small eps being selected as +stopping condition. Exact value of automatically selected eps is version- +-dependent. -- ALGLIB -- - Copyright 20.08.2010 by Bochkanov Sergey + Copyright 10.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::nleqsetcond(nleqstate state, double epsf, ae_int_t maxits); +
    void alglib::pcatruncatedsubspace( + real_2d_array x, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nneeded, + double eps, + ae_int_t maxits, + real_1d_array& s2, + real_2d_array& v, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function sets maximum step length +Sparse truncated principal components analysis -INPUT PARAMETERS: - State - structure which stores algorithm state - StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't - want to limit step length. +This function performs sparse truncated PCA, i.e. returns just a few most +important principal components for a sparse input X. -Use this subroutine when target function contains exp() or other fast -growing functions, and algorithm makes too large steps which lead to -overflow. This function allows us to reject steps that are too large (and -therefore expose us to the possible overflow) without actually calculating -function value at the x+stp*d. +Internally it uses iterative eigensolver which is very efficient when only +a minor fraction of full basis is required. - -- ALGLIB -- - Copyright 20.08.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::nleqsetstpmax(nleqstate state, double stpmax); +It should be noted that, unlike LDA, PCA does not use class labels. -
    - -
    -
    /************************************************************************* -This function turns on/off reporting. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - State - structure which stores algorithm state - NeedXRep- whether iteration reports are needed or not - -If NeedXRep is True, algorithm will call rep() callback function if it is -provided to NLEQSolve(). + X - sparse dataset, sparse npoints*nvars matrix. It is + recommended to use CRS sparse storage format; non-CRS + input will be internally converted to CRS. + Matrix contains ONLY INDEPENDENT VARIABLES, and must + be EXACTLY npoints*nvars. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NNeeded - number of requested components, in [1,NVars] range; + this function is efficient only for NNeeded<<NVars. + Eps - desired precision of vectors returned; underlying + solver will stop iterations as soon as absolute error + in corresponding singular values reduces to roughly + eps*MAX(lambda[]), with lambda[] being array of eigen + values. + Zero value means that algorithm performs number of + iterations specified by maxits parameter, without + paying attention to precision. + MaxIts - number of iterations performed by subspace iteration + method. Zero value means that no limit on iteration + count is placed (eps-based stopping condition is used). - -- ALGLIB -- - Copyright 20.08.2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::nleqsetxrep(nleqstate state, bool needxrep); -
    - -
    -
    /************************************************************************* -This family of functions is used to launcn iterations of nonlinear solver +OUTPUT PARAMETERS: + S2 - array[NNeeded]. Variance values corresponding + to basis vectors. + V - array[NVars,NNeeded] + matrix, whose columns store basis vectors. -These functions accept following parameters: - state - algorithm state - func - callback which calculates function (or merit function) - value func at given point x - jac - callback which calculates function vector fi[] - and Jacobian jac at given point x - rep - optional callback which is called after each iteration - can be NULL - ptr - optional pointer which is passed to func/grad/hess/jac/rep - can be NULL +NOTE: passing eps=0 and maxits=0 results in small eps being selected as + a stopping condition. Exact value of automatically selected eps is + version-dependent. +NOTE: zero MaxIts is silently replaced by some reasonable value which + prevents eternal loops (possible when inputs are degenerate and too + stringent stopping criteria are specified). In current version it + is 50+2*NVars. -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright 10.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void nleqsolve(nleqstate &state, - void (*func)(const real_1d_array &x, double &func, void *ptr), - void (*jac)(const real_1d_array &x, real_1d_array &fi, real_2d_array &jac, void *ptr), - void (*rep)(const real_1d_array &x, double func, void *ptr) = NULL, - void *ptr = NULL); +
    void alglib::pcatruncatedsubspacesparse( + sparsematrix x, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t nneeded, + double eps, + ae_int_t maxits, + real_1d_array& s2, + real_2d_array& v, + const xparams _params = alglib::xdefault); +
    - + - -
    -
    /************************************************************************* -Error function - -The integral is - - x - - - 2 | | 2 - erf(x) = -------- | exp( - t ) dt. - sqrt(pi) | | - - - 0 - -For 0 <= |x| < 1, erf(x) = x * P4(x**2)/Q5(x**2); otherwise -erf(x) = 1 - erfc(x). - - -ACCURACY: - - Relative error: -arithmetic domain # trials peak rms - IEEE 0,1 30000 3.7e-16 1.0e-16 - -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier -*************************************************************************/ -
    double alglib::errorfunction(double x); - -
    - +
     
    /************************************************************************* -Complementary error function - - 1 - erf(x) = - - inf. - - - 2 | | 2 - erfc(x) = -------- | exp( - t ) dt - sqrt(pi) | | - - - x +Inverse Poisson distribution +Finds the Poisson variable x such that the integral +from 0 to x of the Poisson density is equal to the +given probability y. -For small x, erfc(x) = 1 - erf(x); otherwise rational -approximations are computed. +This is accomplished using the inverse gamma integral +function and the relation + m = igami( k+1, y ). ACCURACY: - Relative error: -arithmetic domain # trials peak rms - IEEE 0,26.6417 30000 5.7e-14 1.5e-14 +See inverse incomplete gamma function Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::errorfunctionc(double x); +
    double alglib::invpoissondistribution( + ae_int_t k, + double y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Inverse of the error function +Complemented Poisson distribution -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier -*************************************************************************/ -
    double alglib::inverf(double e); +Returns the sum of the terms k+1 to infinity of the Poisson +distribution: -
    - -
    -
    /************************************************************************* -Inverse of Normal distribution function + inf. j + -- -m m + > e -- + -- j! + j=k+1 -Returns the argument, x, for which the area under the -Gaussian probability density function (integrated from -minus infinity to x) is equal to y. +The terms are not summed directly; instead the incomplete +gamma integral is employed, according to the formula +y = pdtrc( k, m ) = igam( k+1, m ). -For small arguments 0 < y < exp(-2), the program computes -z = sqrt( -2.0 * log(y) ); then the approximation is -x = z - log(z)/z - (1/z) P(1/z) / Q(1/z). -There are two rational functions P/Q, one for 0 < y < exp(-32) -and the other for y up to exp(-2). For larger arguments, -w = y - 0.5, and x/sqrt(2pi) = w + w**3 R(w**2)/S(w**2)). +The arguments must both be positive. ACCURACY: - Relative error: -arithmetic domain # trials peak rms - IEEE 0.125, 1 20000 7.2e-16 1.3e-16 - IEEE 3e-308, 0.135 50000 4.6e-16 9.8e-17 +See incomplete gamma function Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::invnormaldistribution(double y0); +
    double alglib::poissoncdistribution( + ae_int_t k, + double m, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Normal distribution function - -Returns the area under the Gaussian probability density -function, integrated from minus infinity to x: +Poisson distribution - x - - - 1 | | 2 - ndtr(x) = --------- | exp( - t /2 ) dt - sqrt(2pi) | | - - - -inf. +Returns the sum of the first k+1 terms of the Poisson +distribution: - = ( 1 + erf(z) ) / 2 - = erfc(z) / 2 + k j + -- -m m + > e -- + -- j! + j=0 -where z = x/sqrt(2). Computation is via the functions -erf and erfc. +The terms are not summed directly; instead the incomplete +gamma integral is employed, according to the relation +y = pdtr( k, m ) = igamc( k+1, m ). +The arguments must both be positive. ACCURACY: - Relative error: -arithmetic domain # trials peak rms - IEEE -13,0 30000 3.4e-14 6.7e-15 +See incomplete gamma function Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1988, 1992, 2000 by Stephen L. Moshier +Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::normaldistribution(double x); +
    double alglib::poissondistribution( + ae_int_t k, + double m, + const xparams _params = alglib::xdefault);
    - +
    - -normestimatorstate
    -normestimatorcreate
    -normestimatorestimatesparse
    -normestimatorresults
    -normestimatorsetseed
    +polynomialbar2cheb
    +polynomialbar2pow
    +polynomialbuild
    +polynomialbuildcheb1
    +polynomialbuildcheb2
    +polynomialbuildeqdist
    +polynomialcalccheb1
    +polynomialcalccheb2
    +polynomialcalceqdist
    +polynomialcheb2bar
    +polynomialpow2bar
    + + +
    polint_d_calcdiff Interpolation and differentiation using barycentric representation
    polint_d_conv Conversion between power basis and barycentric representation
    polint_d_spec Polynomial interpolation on special grids (equidistant, Chebyshev I/II)
    - -
    -
    /************************************************************************* -This object stores state of the iterative norm estimation algorithm. - -You should use ALGLIB functions to work with this object. -*************************************************************************/ -
    class normestimatorstate -{ -}; - -
    - +
     
    /************************************************************************* -This procedure initializes matrix norm estimator. - -USAGE: -1. User initializes algorithm state with NormEstimatorCreate() call -2. User calls NormEstimatorEstimateSparse() (or NormEstimatorIteration()) -3. User calls NormEstimatorResults() to get solution. +Conversion from barycentric representation to Chebyshev basis. +This function has O(N^2) complexity. INPUT PARAMETERS: - M - number of rows in the matrix being estimated, M>0 - N - number of columns in the matrix being estimated, N>0 - NStart - number of random starting vectors - recommended value - at least 5. - NIts - number of iterations to do with best starting vector - recommended value - at least 5. - -OUTPUT PARAMETERS: - State - structure which stores algorithm state - + P - polynomial in barycentric form + A,B - base interval for Chebyshev polynomials (see below) + A<>B -NOTE: this algorithm is effectively deterministic, i.e. it always returns -same result when repeatedly called for the same matrix. In fact, algorithm -uses randomized starting vectors, but internal random numbers generator -always generates same sequence of the random values (it is a feature, not -bug). +OUTPUT PARAMETERS + T - coefficients of Chebyshev representation; + P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N-1 }, + where Ti - I-th Chebyshev polynomial. -Algorithm can be made non-deterministic with NormEstimatorSetSeed(0) call. +NOTES: + barycentric interpolant passed as P may be either polynomial obtained + from polynomial interpolation/ fitting or rational function which is + NOT polynomial. We can't distinguish between these two cases, and this + algorithm just tries to work assuming that P IS a polynomial. If not, + algorithm will return results, but they won't have any meaning. -- ALGLIB -- - Copyright 06.12.2011 by Bochkanov Sergey + Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::normestimatorcreate( - ae_int_t m, - ae_int_t n, - ae_int_t nstart, - ae_int_t nits, - normestimatorstate& state); +
    void alglib::polynomialbar2cheb( + barycentricinterpolant p, + double a, + double b, + real_1d_array& t, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function estimates norm of the sparse M*N matrix A. +Conversion from barycentric representation to power basis. +This function has O(N^2) complexity. INPUT PARAMETERS: - State - norm estimator state, must be initialized with a call - to NormEstimatorCreate() - A - sparse M*N matrix, must be converted to CRS format - prior to calling this function. + P - polynomial in barycentric form + C - offset (see below); 0.0 is used as default value. + S - scale (see below); 1.0 is used as default value. S<>0. -After this function is over you can call NormEstimatorResults() to get -estimate of the norm(A). +OUTPUT PARAMETERS + A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } + N - number of coefficients (polynomial degree plus 1) - -- ALGLIB -- - Copyright 06.12.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::normestimatorestimatesparse( - normestimatorstate state, - sparsematrix a); +NOTES: +1. this function accepts offset and scale, which can be set to improve + numerical properties of polynomial. For example, if P was obtained as + result of interpolation on [-1,+1], you can set C=0 and S=1 and + represent P as sum of 1, x, x^2, x^3 and so on. In most cases you it + is exactly what you need. -
    - -
    -
    /************************************************************************* -Matrix norm estimation results + However, if your interpolation model was built on [999,1001], you will + see significant growth of numerical errors when using {1, x, x^2, x^3} + as basis. Representing P as sum of 1, (x-1000), (x-1000)^2, (x-1000)^3 + will be better option. Such representation can be obtained by using + 1000.0 as offset C and 1.0 as scale S. -INPUT PARAMETERS: - State - algorithm state +2. power basis is ill-conditioned and tricks described above can't solve + this problem completely. This function will return coefficients in + any case, but for N>8 they will become unreliable. However, N's + less than 5 are pretty safe. -OUTPUT PARAMETERS: - Nrm - estimate of the matrix norm, Nrm>=0 +3. barycentric interpolant passed as P may be either polynomial obtained + from polynomial interpolation/ fitting or rational function which is + NOT polynomial. We can't distinguish between these two cases, and this + algorithm just tries to work assuming that P IS a polynomial. If not, + algorithm will return results, but they won't have any meaning. -- ALGLIB -- - Copyright 06.12.2011 by Bochkanov Sergey + Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::normestimatorresults(normestimatorstate state, double& nrm); +
    void alglib::polynomialbar2pow( + barycentricinterpolant p, + real_1d_array& a, + const xparams _params = alglib::xdefault); +void alglib::polynomialbar2pow( + barycentricinterpolant p, + double c, + double s, + real_1d_array& a, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function changes seed value used by algorithm. In some cases we need -deterministic processing, i.e. subsequent calls must return equal results, -in other cases we need non-deterministic algorithm which returns different -results for the same matrix on every pass. - -Setting zero seed will lead to non-deterministic algorithm, while non-zero -value will make our algorithm deterministic. +Lagrange intepolant: generation of the model on the general grid. +This function has O(N^2) complexity. INPUT PARAMETERS: - State - norm estimator state, must be initialized with a call - to NormEstimatorCreate() - SeedVal - seed value, >=0. Zero value = non-deterministic algo. - - -- ALGLIB -- - Copyright 06.12.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::normestimatorsetseed( - normestimatorstate state, - ae_int_t seedval); - -
    - -
    - -odesolverreport
    -odesolverstate
    - -odesolverresults
    -odesolverrkck
    -odesolversolve
    - - - -
    odesolver_d1 Solving y'=-y with ODE solver
    - -
    -
    /************************************************************************* - -*************************************************************************/ -
    class odesolverreport -{ - ae_int_t nfev; - ae_int_t terminationtype; -}; + X - abscissas, array[0..N-1] + Y - function values, array[0..N-1] + N - number of points, N>=1 -
    - -
    -
    /************************************************************************* +OUTPUT PARAMETERS + P - barycentric model which represents Lagrange interpolant + (see ratint unit info and BarycentricCalc() description for + more information). + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    class odesolverstate -{ -}; +
    void alglib::polynomialbuild( + real_1d_array x, + real_1d_array y, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +void alglib::polynomialbuild( + real_1d_array x, + real_1d_array y, + ae_int_t n, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -ODE solver results - -Called after OdeSolverIteration returned False. +Lagrange intepolant on Chebyshev grid (first kind). +This function has O(N) complexity. INPUT PARAMETERS: - State - algorithm state (used by OdeSolverIteration). + A - left boundary of [A,B] + B - right boundary of [A,B] + Y - function values at the nodes, array[0..N-1], + Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n))) + N - number of points, N>=1 + for N=1 a constant model is constructed. -OUTPUT PARAMETERS: - M - number of tabulated values, M>=1 - XTbl - array[0..M-1], values of X - YTbl - array[0..M-1,0..N-1], values of Y in X[i] - Rep - solver report: - * Rep.TerminationType completetion code: - * -2 X is not ordered by ascending/descending or - there are non-distinct X[], i.e. X[i]=X[i+1] - * -1 incorrect parameters were specified - * 1 task has been solved - * Rep.NFEV contains number of function calculations +OUTPUT PARAMETERS + P - barycentric model which represents Lagrange interpolant + (see ratint unit info and BarycentricCalc() description for + more information). -- ALGLIB -- - Copyright 01.09.2009 by Bochkanov Sergey + Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::odesolverresults( - odesolverstate state, - ae_int_t& m, - real_1d_array& xtbl, - real_2d_array& ytbl, - odesolverreport& rep); +
    void alglib::polynomialbuildcheb1( + double a, + double b, + real_1d_array y, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +void alglib::polynomialbuildcheb1( + double a, + double b, + real_1d_array y, + ae_int_t n, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Cash-Karp adaptive ODE solver. - -This subroutine solves ODE Y'=f(Y,x) with initial conditions Y(xs)=Ys -(here Y may be single variable or vector of N variables). +Lagrange intepolant on Chebyshev grid (second kind). +This function has O(N) complexity. INPUT PARAMETERS: - Y - initial conditions, array[0..N-1]. - contains values of Y[] at X[0] - N - system size - X - points at which Y should be tabulated, array[0..M-1] - integrations starts at X[0], ends at X[M-1], intermediate - values at X[i] are returned too. - SHOULD BE ORDERED BY ASCENDING OR BY DESCENDING! - M - number of intermediate points + first point + last point: - * M>2 means that you need both Y(X[M-1]) and M-2 values at - intermediate points - * M=2 means that you want just to integrate from X[0] to - X[1] and don't interested in intermediate values. - * M=1 means that you don't want to integrate :) - it is degenerate case, but it will be handled correctly. - * M<1 means error - Eps - tolerance (absolute/relative error on each step will be - less than Eps). When passing: - * Eps>0, it means desired ABSOLUTE error - * Eps<0, it means desired RELATIVE error. Relative errors - are calculated with respect to maximum values of Y seen - so far. Be careful to use this criterion when starting - from Y[] that are close to zero. - H - initial step lenth, it will be adjusted automatically - after the first step. If H=0, step will be selected - automatically (usualy it will be equal to 0.001 of - min(x[i]-x[j])). + A - left boundary of [A,B] + B - right boundary of [A,B] + Y - function values at the nodes, array[0..N-1], + Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1))) + N - number of points, N>=1 + for N=1 a constant model is constructed. OUTPUT PARAMETERS - State - structure which stores algorithm state between subsequent - calls of OdeSolverIteration. Used for reverse communication. - This structure should be passed to the OdeSolverIteration - subroutine. - -SEE ALSO - AutoGKSmoothW, AutoGKSingular, AutoGKIteration, AutoGKResults. - + P - barycentric model which represents Lagrange interpolant + (see ratint unit info and BarycentricCalc() description for + more information). -- ALGLIB -- - Copyright 01.09.2009 by Bochkanov Sergey + Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::odesolverrkck( +
    void alglib::polynomialbuildcheb2( + double a, + double b, real_1d_array y, - real_1d_array x, - double eps, - double h, - odesolverstate& state); -void alglib::odesolverrkck( + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +void alglib::polynomialbuildcheb2( + double a, + double b, real_1d_array y, ae_int_t n, - real_1d_array x, - ae_int_t m, - double eps, - double h, - odesolverstate& state); + barycentricinterpolant& p, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function is used to launcn iterations of ODE solver +Lagrange intepolant: generation of the model on equidistant grid. +This function has O(N) complexity. -It accepts following parameters: - diff - callback which calculates dy/dx for given y and x - ptr - optional pointer which is passed to diff; can be NULL +INPUT PARAMETERS: + A - left boundary of [A,B] + B - right boundary of [A,B] + Y - function values at the nodes, array[0..N-1] + N - number of points, N>=1 + for N=1 a constant model is constructed. +OUTPUT PARAMETERS + P - barycentric model which represents Lagrange interpolant + (see ratint unit info and BarycentricCalc() description for + more information). -- ALGLIB -- - Copyright 01.09.2009 by Bochkanov Sergey + Copyright 03.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void odesolversolve(odesolverstate &state, - void (*diff)(const real_1d_array &y, double x, real_1d_array &dy, void *ptr), - void *ptr = NULL); +
    void alglib::polynomialbuildeqdist( + double a, + double b, + real_1d_array y, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +void alglib::polynomialbuildeqdist( + double a, + double b, + real_1d_array y, + ae_int_t n, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "diffequations.h"
    +
    /************************************************************************* +Fast polynomial interpolation function on Chebyshev points (first kind) +with O(N) complexity. -using namespace alglib; -void ode_function_1_diff(const real_1d_array &y, double x, real_1d_array &dy, void *ptr) -{ - // this callback calculates f(y[],x)=-y[0] - dy[0] = -y[0]; -} +INPUT PARAMETERS: + A - left boundary of [A,B] + B - right boundary of [A,B] + F - function values, array[0..N-1] + N - number of points on Chebyshev grid (first kind), + X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)) + for N=1 a constant model is constructed. + T - position where P(x) is calculated -int main(int argc, char **argv) -{ - real_1d_array y = "[1]"; - real_1d_array x = "[0, 1, 2, 3]"; - double eps = 0.00001; - double h = 0; - odesolverstate s; - ae_int_t m; - real_1d_array xtbl; - real_2d_array ytbl; - odesolverreport rep; - odesolverrkck(y, x, eps, h, s); - alglib::odesolversolve(s, ode_function_1_diff); - odesolverresults(s, m, xtbl, ytbl, rep); - printf("%d\n", int(m)); // EXPECTED: 4 - printf("%s\n", xtbl.tostring(2).c_str()); // EXPECTED: [0, 1, 2, 3] - printf("%s\n", ytbl.tostring(2).c_str()); // EXPECTED: [[1], [0.367], [0.135], [0.050]] - return 0; -} +RESULT + value of the Lagrange interpolant at T +IMPORTANT + this function provides fast interface which is not overflow-safe + nor it is very precise. + the best option is to use PolIntBuildCheb1()/BarycentricCalc() + subroutines unless you are pretty sure that your data will not result + in overflow. -
    - - + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::polynomialcalccheb1( + double a, + double b, + real_1d_array f, + double t, + const xparams _params = alglib::xdefault); +double alglib::polynomialcalccheb1( + double a, + double b, + real_1d_array f, + ae_int_t n, + double t, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    +
     
    /************************************************************************* -LQ decomposition of a rectangular complex matrix of size MxN - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Fast polynomial interpolation function on Chebyshev points (second kind) +with O(N) complexity. -Input parameters: - A - matrix A whose indexes range within [0..M-1, 0..N-1] - M - number of rows in matrix A. - N - number of columns in matrix A. +INPUT PARAMETERS: + A - left boundary of [A,B] + B - right boundary of [A,B] + F - function values, array[0..N-1] + N - number of points on Chebyshev grid (second kind), + X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1)) + for N=1 a constant model is constructed. + T - position where P(x) is calculated -Output parameters: - A - matrices Q and L in compact form - Tau - array of scalar factors which are used to form matrix Q. Array - whose indexes range within [0.. Min(M,N)-1] +RESULT + value of the Lagrange interpolant at T -Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size -MxM, L - lower triangular (or lower trapezoid) matrix of size MxN. +IMPORTANT + this function provides fast interface which is not overflow-safe + nor it is very precise. + the best option is to use PolIntBuildCheb2()/BarycentricCalc() + subroutines unless you are pretty sure that your data will not result + in overflow. - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlq( - complex_2d_array& a, - ae_int_t m, - ae_int_t n, - complex_1d_array& tau); -void alglib::smp_cmatrixlq( - complex_2d_array& a, - ae_int_t m, +
    double alglib::polynomialcalccheb2( + double a, + double b, + real_1d_array f, + double t, + const xparams _params = alglib::xdefault); +double alglib::polynomialcalccheb2( + double a, + double b, + real_1d_array f, ae_int_t n, - complex_1d_array& tau); + double t, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Unpacking of matrix L from the LQ decomposition of a matrix A +Fast equidistant polynomial interpolation function with O(N) complexity -Input parameters: - A - matrices Q and L in compact form. - Output of CMatrixLQ subroutine. - M - number of rows in given matrix A. M>=0. - N - number of columns in given matrix A. N>=0. +INPUT PARAMETERS: + A - left boundary of [A,B] + B - right boundary of [A,B] + F - function values, array[0..N-1] + N - number of points on equidistant grid, N>=1 + for N=1 a constant model is constructed. + T - position where P(x) is calculated -Output parameters: - L - matrix L, array[0..M-1, 0..N-1]. +RESULT + value of the Lagrange interpolant at T - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey +IMPORTANT + this function provides fast interface which is not overflow-safe + nor it is very precise. + the best option is to use PolynomialBuildEqDist()/BarycentricCalc() + subroutines unless you are pretty sure that your data will not result + in overflow. + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlqunpackl( - complex_2d_array a, - ae_int_t m, +
    double alglib::polynomialcalceqdist( + double a, + double b, + real_1d_array f, + double t, + const xparams _params = alglib::xdefault); +double alglib::polynomialcalceqdist( + double a, + double b, + real_1d_array f, ae_int_t n, - complex_2d_array& l); + double t, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Partial unpacking of matrix Q from LQ decomposition of a complex matrix A. - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +Conversion from Chebyshev basis to barycentric representation. +This function has O(N^2) complexity. -Input parameters: - A - matrices Q and R in compact form. - Output of CMatrixLQ subroutine . - M - number of rows in matrix A. M>=0. - N - number of columns in matrix A. N>=0. - Tau - scalar factors which are used to form Q. - Output of CMatrixLQ subroutine . - QRows - required number of rows in matrix Q. N>=QColumns>=0. +INPUT PARAMETERS: + T - coefficients of Chebyshev representation; + P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N }, + where Ti - I-th Chebyshev polynomial. + N - number of coefficients: + * if given, only leading N elements of T are used + * if not given, automatically determined from size of T + A,B - base interval for Chebyshev polynomials (see above) + A<B -Output parameters: - Q - first QRows rows of matrix Q. - Array whose index ranges within [0..QRows-1, 0..N-1]. - If QRows=0, array isn't changed. +OUTPUT PARAMETERS + P - polynomial in barycentric form - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixlqunpackq( - complex_2d_array a, - ae_int_t m, - ae_int_t n, - complex_1d_array tau, - ae_int_t qrows, - complex_2d_array& q); -void alglib::smp_cmatrixlqunpackq( - complex_2d_array a, - ae_int_t m, +
    void alglib::polynomialcheb2bar( + real_1d_array t, + double a, + double b, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +void alglib::polynomialcheb2bar( + real_1d_array t, ae_int_t n, - complex_1d_array tau, - ae_int_t qrows, - complex_2d_array& q); + double a, + double b, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -QR decomposition of a rectangular complex matrix of size MxN +Conversion from power basis to barycentric representation. +This function has O(N^2) complexity. -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } + N - number of coefficients (polynomial degree plus 1) + * if given, only leading N elements of A are used + * if not given, automatically determined from size of A + C - offset (see below); 0.0 is used as default value. + S - scale (see below); 1.0 is used as default value. S<>0. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +OUTPUT PARAMETERS + P - polynomial in barycentric form -Input parameters: - A - matrix A whose indexes range within [0..M-1, 0..N-1] - M - number of rows in matrix A. - N - number of columns in matrix A. -Output parameters: - A - matrices Q and R in compact form - Tau - array of scalar factors which are used to form matrix Q. Array - whose indexes range within [0.. Min(M,N)-1] +NOTES: +1. this function accepts offset and scale, which can be set to improve + numerical properties of polynomial. For example, if you interpolate on + [-1,+1], you can set C=0 and S=1 and convert from sum of 1, x, x^2, + x^3 and so on. In most cases you it is exactly what you need. -Matrix A is represented as A = QR, where Q is an orthogonal matrix of size -MxM, R - upper triangular (or upper trapezoid) matrix of size MxN. + However, if your interpolation model was built on [999,1001], you will + see significant growth of numerical errors when using {1, x, x^2, x^3} + as input basis. Converting from sum of 1, (x-1000), (x-1000)^2, + (x-1000)^3 will be better option (you have to specify 1000.0 as offset + C and 1.0 as scale S). + +2. power basis is ill-conditioned and tricks described above can't solve + this problem completely. This function will return barycentric model + in any case, but for N>8 accuracy well degrade. However, N's less than + 5 are pretty safe. - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB -- + Copyright 30.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::cmatrixqr( - complex_2d_array& a, - ae_int_t m, - ae_int_t n, - complex_1d_array& tau); -void alglib::smp_cmatrixqr( - complex_2d_array& a, - ae_int_t m, +
    void alglib::polynomialpow2bar( + real_1d_array a, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault); +void alglib::polynomialpow2bar( + real_1d_array a, ae_int_t n, - complex_1d_array& tau); + double c, + double s, + barycentricinterpolant& p, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
    -
    /************************************************************************* -Partial unpacking of matrix Q from QR decomposition of a complex matrix A. - -COMMERCIAL EDITION OF ALGLIB: +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +using namespace alglib; -Input parameters: - A - matrices Q and R in compact form. - Output of CMatrixQR subroutine . - M - number of rows in matrix A. M>=0. - N - number of columns in matrix A. N>=0. - Tau - scalar factors which are used to form Q. - Output of CMatrixQR subroutine . - QColumns - required number of columns in matrix Q. M>=QColumns>=0. -Output parameters: - Q - first QColumns columns of matrix Q. - Array whose index ranges within [0..M-1, 0..QColumns-1]. - If QColumns=0, array isn't changed. +int main(int argc, char **argv) +{ + // + // Here we demonstrate polynomial interpolation and differentiation + // of y=x^2-x sampled at [0,1,2]. Barycentric representation of polynomial is used. + // + real_1d_array x = "[0,1,2]"; + real_1d_array y = "[0,0,2]"; + double t = -1; + double v; + double dv; + double d2v; + barycentricinterpolant p; - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::cmatrixqrunpackq( - complex_2d_array a, - ae_int_t m, - ae_int_t n, - complex_1d_array tau, - ae_int_t qcolumns, - complex_2d_array& q); -void alglib::smp_cmatrixqrunpackq( - complex_2d_array a, - ae_int_t m, - ae_int_t n, - complex_1d_array tau, - ae_int_t qcolumns, - complex_2d_array& q); + // barycentric model is created + polynomialbuild(x, y, p); -
    - -
    -
    /************************************************************************* -Unpacking of matrix R from the QR decomposition of a matrix A + // barycentric interpolation is demonstrated + v = barycentriccalc(p, t); + printf("%.4f\n", double(v)); // EXPECTED: 2.0 -Input parameters: - A - matrices Q and R in compact form. - Output of CMatrixQR subroutine. - M - number of rows in given matrix A. M>=0. - N - number of columns in given matrix A. N>=0. + // barycentric differentation is demonstrated + barycentricdiff1(p, t, v, dv); + printf("%.4f\n", double(v)); // EXPECTED: 2.0 + printf("%.4f\n", double(dv)); // EXPECTED: -3.0 -Output parameters: - R - matrix R, array[0..M-1, 0..N-1]. + // second derivatives with barycentric representation + barycentricdiff1(p, t, v, dv); + printf("%.4f\n", double(v)); // EXPECTED: 2.0 + printf("%.4f\n", double(dv)); // EXPECTED: -3.0 + barycentricdiff2(p, t, v, dv, d2v); + printf("%.4f\n", double(v)); // EXPECTED: 2.0 + printf("%.4f\n", double(dv)); // EXPECTED: -3.0 + printf("%.4f\n", double(d2v)); // EXPECTED: 2.0 + return 0; +} - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey -*************************************************************************/ -
    void alglib::cmatrixqrunpackr( - complex_2d_array a, - ae_int_t m, - ae_int_t n, - complex_2d_array& r); -
    - +
    -
    /************************************************************************* -Reduction of a Hermitian matrix which is given by its higher or lower -triangular part to a real tridiagonal matrix using unitary similarity -transformation: Q'*A*Q = T. - +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -COMMERCIAL EDITION OF ALGLIB: +using namespace alglib; - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. -Input parameters: - A - matrix to be transformed - array with elements [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - storage format. If IsUpper = True, then matrix A is given - by its upper triangle, and the lower triangle is not used - and not modified by the algorithm, and vice versa - if IsUpper = False. +int main(int argc, char **argv) +{ + // + // Here we demonstrate conversion of y=x^2-x + // between power basis and barycentric representation. + // + real_1d_array a = "[0,-1,+1]"; + double t = 2; + real_1d_array a2; + double v; + barycentricinterpolant p; -Output parameters: - A - matrices T and Q in compact form (see lower) - Tau - array of factors which are forming matrices H(i) - array with elements [0..N-2]. - D - main diagonal of real symmetric matrix T. - array with elements [0..N-1]. - E - secondary diagonal of real symmetric matrix T. - array with elements [0..N-2]. + // + // a=[0,-1,+1] is decomposition of y=x^2-x in the power basis: + // + // y = 0 - 1*x + 1*x^2 + // + // We convert it to the barycentric form. + // + polynomialpow2bar(a, p); + // now we have barycentric interpolation; we can use it for interpolation + v = barycentriccalc(p, t); + printf("%.2f\n", double(v)); // EXPECTED: 2.0 - If IsUpper=True, the matrix Q is represented as a product of elementary - reflectors + // we can also convert back from barycentric representation to power basis + polynomialbar2pow(p, a2); + printf("%s\n", a2.tostring(2).c_str()); // EXPECTED: [0,-1,+1] + return 0; +} - Q = H(n-2) . . . H(2) H(0). - Each H(i) has the form +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -     H(i) = I - tau * v * v'
    +using namespace alglib;
     
    -  where tau is a complex scalar, and v is a complex vector with
    -  v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in
    -  A(0:i-1,i+1), and tau in TAU(i).
     
    -  If IsUpper=False, the matrix Q is represented as a product of elementary
    -  reflectors
    +int main(int argc, char **argv)
    +{
    +    //
    +    // Temporaries:
    +    // * values of y=x^2-x sampled at three special grids:
    +    //   * equdistant grid spanning [0,2],     x[i] = 2*i/(N-1), i=0..N-1
    +    //   * Chebyshev-I grid spanning [-1,+1],  x[i] = 1 + Cos(PI*(2*i+1)/(2*n)), i=0..N-1
    +    //   * Chebyshev-II grid spanning [-1,+1], x[i] = 1 + Cos(PI*i/(n-1)), i=0..N-1
    +    // * barycentric interpolants for these three grids
    +    // * vectors to store coefficients of quadratic representation
    +    //
    +    real_1d_array y_eqdist = "[0,0,2]";
    +    real_1d_array y_cheb1 = "[-0.116025,0.000000,1.616025]";
    +    real_1d_array y_cheb2 = "[0,0,2]";
    +    barycentricinterpolant p_eqdist;
    +    barycentricinterpolant p_cheb1;
    +    barycentricinterpolant p_cheb2;
    +    real_1d_array a_eqdist;
    +    real_1d_array a_cheb1;
    +    real_1d_array a_cheb2;
     
    -     Q = H(0) H(2) . . . H(n-2).
    +    //
    +    // First, we demonstrate construction of barycentric interpolants on
    +    // special grids. We unpack power representation to ensure that
    +    // interpolant was built correctly.
    +    //
    +    // In all three cases we should get same quadratic function.
    +    //
    +    polynomialbuildeqdist(0.0, 2.0, y_eqdist, p_eqdist);
    +    polynomialbar2pow(p_eqdist, a_eqdist);
    +    printf("%s\n", a_eqdist.tostring(4).c_str()); // EXPECTED: [0,-1,+1]
     
    -  Each H(i) has the form
    +    polynomialbuildcheb1(-1, +1, y_cheb1, p_cheb1);
    +    polynomialbar2pow(p_cheb1, a_cheb1);
    +    printf("%s\n", a_cheb1.tostring(4).c_str()); // EXPECTED: [0,-1,+1]
     
    -     H(i) = I - tau * v * v'
    +    polynomialbuildcheb2(-1, +1, y_cheb2, p_cheb2);
    +    polynomialbar2pow(p_cheb2, a_cheb2);
    +    printf("%s\n", a_cheb2.tostring(4).c_str()); // EXPECTED: [0,-1,+1]
     
    -  where tau is a complex scalar, and v is a complex vector with
    -  v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in A(i+2:n-1,i),
    -  and tau in TAU(i).
    +    //
    +    // Now we demonstrate polynomial interpolation without construction 
    +    // of the barycentricinterpolant structure.
    +    //
    +    // We calculate interpolant value at x=-2.
    +    // In all three cases we should get same f=6
    +    //
    +    double t = -2;
    +    double v;
    +    v = polynomialcalceqdist(0.0, 2.0, y_eqdist, t);
    +    printf("%.4f\n", double(v)); // EXPECTED: 6.0
     
    -  The contents of A on exit are illustrated by the following examples
    -  with n = 5:
    +    v = polynomialcalccheb1(-1, +1, y_cheb1, t);
    +    printf("%.4f\n", double(v)); // EXPECTED: 6.0
     
    -  if UPLO = 'U':                       if UPLO = 'L':
    +    v = polynomialcalccheb2(-1, +1, y_cheb2, t);
    +    printf("%.4f\n", double(v)); // EXPECTED: 6.0
    +    return 0;
    +}
     
    -    (  d   e   v1  v2  v3 )              (  d                  )
    -    (      d   e   v2  v3 )              (  e   d              )
    -    (          d   e   v3 )              (  v0  e   d          )
    -    (              d   e  )              (  v0  v1  e   d      )
    -    (                  d  )              (  v0  v1  v2  e   d  )
     
    -where d and e denote diagonal and off-diagonal elements of T, and vi
    -denotes an element of the vector defining H(i).
    +
    +
    + +polynomialsolverreport
    + +polynomialsolve
    + + +
    + +
    +
    /************************************************************************* - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 *************************************************************************/ -
    void alglib::hmatrixtd( - complex_2d_array& a, - ae_int_t n, - bool isupper, - complex_1d_array& tau, - real_1d_array& d, - real_1d_array& e); +
    class polynomialsolverreport +{ + double maxerr; +};
    - +
     
    /************************************************************************* -Unpacking matrix Q which reduces a Hermitian matrix to a real tridiagonal -form. +Polynomial root finding. +This function returns all roots of the polynomial + P(x) = a0 + a1*x + a2*x^2 + ... + an*x^n +Both real and complex roots are returned (see below). -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + A - array[N+1], polynomial coefficients: + * A[0] is constant term + * A[N] is a coefficient of X^N + N - polynomial degree - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +OUTPUT PARAMETERS: + X - array of complex roots: + * for isolated real root, X[I] is strictly real: IMAGE(X[I])=0 + * complex roots are always returned in pairs - roots occupy + positions I and I+1, with: + * X[I+1]=Conj(X[I]) + * IMAGE(X[I]) > 0 + * IMAGE(X[I+1]) = -IMAGE(X[I]) < 0 + * multiple real roots may have non-zero imaginary part due + to roundoff errors. There is no reliable way to distinguish + real root of multiplicity 2 from two complex roots in + the presence of roundoff errors. + Rep - report, additional information, following fields are set: + * Rep.MaxErr - max( |P(xi)| ) for i=0..N-1. This field + allows to quickly estimate "quality" of the roots being + returned. -Input parameters: - A - the result of a HMatrixTD subroutine - N - size of matrix A. - IsUpper - storage format (a parameter of HMatrixTD subroutine) - Tau - the result of a HMatrixTD subroutine +NOTE: this function uses companion matrix method to find roots. In case + internal EVD solver fails do find eigenvalues, exception is + generated. -Output parameters: - Q - transformation matrix. - array with elements [0..N-1, 0..N-1]. +NOTE: roots are not "polished" and no matrix balancing is performed + for them. -- ALGLIB -- - Copyright 2005-2010 by Bochkanov Sergey + Copyright 24.02.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::hmatrixtdunpackq( - complex_2d_array a, +
    void alglib::polynomialsolve( + real_1d_array a, ae_int_t n, - bool isupper, - complex_1d_array tau, - complex_2d_array& q); + complex_1d_array& x, + polynomialsolverreport& rep, + const xparams _params = alglib::xdefault);
    - + +
    + +psi
    + + +
    +
     
    /************************************************************************* -Reduction of a rectangular matrix to bidiagonal form - -The algorithm reduces the rectangular matrix A to bidiagonal form by -orthogonal transformations P and Q: A = Q*B*(P^T). - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Multithreaded acceleration is NOT supported for this function because - ! bidiagonal decompostion is inherently sequential in nature. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -Input parameters: - A - source matrix. array[0..M-1, 0..N-1] - M - number of rows in matrix A. - N - number of columns in matrix A. - -Output parameters: - A - matrices Q, B, P in compact form (see below). - TauQ - scalar factors which are used to form matrix Q. - TauP - scalar factors which are used to form matrix P. +Psi (digamma) function -The main diagonal and one of the secondary diagonals of matrix A are -replaced with bidiagonal matrix B. Other elements contain elementary -reflections which form MxM matrix Q and NxN matrix P, respectively. + d - + psi(x) = -- ln | (x) + dx -If M>=N, B is the upper bidiagonal MxN matrix and is stored in the -corresponding elements of matrix A. Matrix Q is represented as a -product of elementary reflections Q = H(0)*H(1)*...*H(n-1), where -H(i) = 1-tau*v*v'. Here tau is a scalar which is stored in TauQ[i], and -vector v has the following structure: v(0:i-1)=0, v(i)=1, v(i+1:m-1) is -stored in elements A(i+1:m-1,i). Matrix P is as follows: P = -G(0)*G(1)*...*G(n-2), where G(i) = 1 - tau*u*u'. Tau is stored in TauP[i], -u(0:i)=0, u(i+1)=1, u(i+2:n-1) is stored in elements A(i,i+2:n-1). +is the logarithmic derivative of the gamma function. +For integer x, + n-1 + - +psi(n) = -EUL + > 1/k. + - + k=1 -If M<N, B is the lower bidiagonal MxN matrix and is stored in the -corresponding elements of matrix A. Q = H(0)*H(1)*...*H(m-2), where -H(i) = 1 - tau*v*v', tau is stored in TauQ, v(0:i)=0, v(i+1)=1, v(i+2:m-1) -is stored in elements A(i+2:m-1,i). P = G(0)*G(1)*...*G(m-1), -G(i) = 1-tau*u*u', tau is stored in TauP, u(0:i-1)=0, u(i)=1, u(i+1:n-1) -is stored in A(i,i+1:n-1). +This formula is used for 0 < n <= 10. If x is negative, it +is transformed to a positive argument by the reflection +formula psi(1-x) = psi(x) + pi cot(pi x). +For general positive x, the argument is made greater than 10 +using the recurrence psi(x+1) = psi(x) + 1/x. +Then the following asymptotic expansion is applied: -EXAMPLE: + inf. B + - 2k +psi(x) = log(x) - 1/2x - > ------- + - 2k + k=1 2k x -m=6, n=5 (m > n): m=5, n=6 (m < n): +where the B2k are Bernoulli numbers. -( d e u1 u1 u1 ) ( d u1 u1 u1 u1 u1 ) -( v1 d e u2 u2 ) ( e d u2 u2 u2 u2 ) -( v1 v2 d e u3 ) ( v1 e d u3 u3 u3 ) -( v1 v2 v3 d e ) ( v1 v2 e d u4 u4 ) -( v1 v2 v3 v4 d ) ( v1 v2 v3 e d u5 ) -( v1 v2 v3 v4 v5 ) +ACCURACY: + Relative error (except absolute when |psi| < 1): +arithmetic domain # trials peak rms + IEEE 0,30 30000 1.3e-15 1.4e-16 + IEEE -30,0 40000 1.5e-15 2.2e-16 -Here vi and ui are vectors which form H(i) and G(i), and d and e - -are the diagonal and off-diagonal elements of matrix B. +Cephes Math Library Release 2.8: June, 2000 +Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier +*************************************************************************/ +
    double alglib::psi(double x, const xparams _params = alglib::xdefault); - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994. - Sergey Bochkanov, ALGLIB project, translation from FORTRAN to - pseudocode, 2007-2010. +
    + + + +
    +
    /************************************************************************* +Barycentric interpolant. *************************************************************************/ -
    void alglib::rmatrixbd( - real_2d_array& a, - ae_int_t m, - ae_int_t n, - real_1d_array& tauq, - real_1d_array& taup); +
    class barycentricinterpolant +{ +};
    - +
     
    /************************************************************************* -Multiplication by matrix P which reduces matrix A to bidiagonal form. +Rational interpolant without poles -The algorithm allows pre- or post-multiply by P or P'. +The subroutine constructs the rational interpolating function without real +poles (see 'Barycentric rational interpolation with no poles and high +rates of approximation', Michael S. Floater. and Kai Hormann, for more +information on this subject). Input parameters: - QP - matrices Q and P in compact form. - Output of RMatrixBD subroutine. - M - number of rows in matrix A. - N - number of columns in matrix A. - TAUP - scalar factors which are used to form P. - Output of RMatrixBD subroutine. - Z - multiplied matrix. - Array whose indexes range within [0..ZRows-1,0..ZColumns-1]. - ZRows - number of rows in matrix Z. If FromTheRight=False, - ZRows=N, otherwise ZRows can be arbitrary. - ZColumns - number of columns in matrix Z. If FromTheRight=True, - ZColumns=N, otherwise ZColumns can be arbitrary. - FromTheRight - pre- or post-multiply. - DoTranspose - multiply by P or P'. + X - interpolation nodes, array[0..N-1]. + Y - function values, array[0..N-1]. + N - number of nodes, N>0. + D - order of the interpolation scheme, 0 <= D <= N-1. + D<0 will cause an error. + D>=N it will be replaced with D=N-1. + if you don't know what D to choose, use small value about 3-5. Output parameters: - Z - product of Z and P. - Array whose indexes range within [0..ZRows-1,0..ZColumns-1]. - If ZRows=0 or ZColumns=0, the array is not modified. + B - barycentric interpolant. - -- ALGLIB -- - 2005-2010 - Bochkanov Sergey +Note: + this algorithm always succeeds and calculates the weights with close + to machine precision. + + -- ALGLIB PROJECT -- + Copyright 17.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixbdmultiplybyp( - real_2d_array qp, - ae_int_t m, +
    void alglib::barycentricbuildfloaterhormann( + real_1d_array x, + real_1d_array y, ae_int_t n, - real_1d_array taup, - real_2d_array& z, - ae_int_t zrows, - ae_int_t zcolumns, - bool fromtheright, - bool dotranspose); + ae_int_t d, + barycentricinterpolant& b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Multiplication by matrix Q which reduces matrix A to bidiagonal form. - -The algorithm allows pre- or post-multiply by Q or Q'. - -COMMERCIAL EDITION OF ALGLIB: +Rational interpolant from X/Y/W arrays - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) -Input parameters: - QP - matrices Q and P in compact form. - Output of ToBidiagonal subroutine. - M - number of rows in matrix A. - N - number of columns in matrix A. - TAUQ - scalar factors which are used to form Q. - Output of ToBidiagonal subroutine. - Z - multiplied matrix. - array[0..ZRows-1,0..ZColumns-1] - ZRows - number of rows in matrix Z. If FromTheRight=False, - ZRows=M, otherwise ZRows can be arbitrary. - ZColumns - number of columns in matrix Z. If FromTheRight=True, - ZColumns=M, otherwise ZColumns can be arbitrary. - FromTheRight - pre- or post-multiply. - DoTranspose - multiply by Q or Q'. +INPUT PARAMETERS: + X - interpolation nodes, array[0..N-1] + F - function values, array[0..N-1] + W - barycentric weights, array[0..N-1] + N - nodes count, N>0 -Output parameters: - Z - product of Z and Q. - Array[0..ZRows-1,0..ZColumns-1] - If ZRows=0 or ZColumns=0, the array is not modified. +OUTPUT PARAMETERS: + B - barycentric interpolant built from (X, Y, W) -- ALGLIB -- - 2005-2010 - Bochkanov Sergey + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixbdmultiplybyq( - real_2d_array qp, - ae_int_t m, +
    void alglib::barycentricbuildxyw( + real_1d_array x, + real_1d_array y, + real_1d_array w, ae_int_t n, - real_1d_array tauq, - real_2d_array& z, - ae_int_t zrows, - ae_int_t zcolumns, - bool fromtheright, - bool dotranspose); + barycentricinterpolant& b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Unpacking of the main and secondary diagonals of bidiagonal decomposition -of matrix A. +Rational interpolation using barycentric formula + +F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) Input parameters: - B - output of RMatrixBD subroutine. - M - number of rows in matrix B. - N - number of columns in matrix B. + B - barycentric interpolant built with one of model building + subroutines. + T - interpolation point -Output parameters: - IsUpper - True, if the matrix is upper bidiagonal. - otherwise IsUpper is False. - D - the main diagonal. - Array whose index ranges within [0..Min(M,N)-1]. - E - the secondary diagonal (upper or lower, depending on - the value of IsUpper). - Array index ranges within [0..Min(M,N)-1], the last - element is not used. +Result: + barycentric interpolant F(t) -- ALGLIB -- - 2005-2010 - Bochkanov Sergey + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixbdunpackdiagonals( - real_2d_array b, - ae_int_t m, - ae_int_t n, - bool& isupper, - real_1d_array& d, - real_1d_array& e); +
    double alglib::barycentriccalc( + barycentricinterpolant b, + double t, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Unpacking matrix P which reduces matrix A to bidiagonal form. -The subroutine returns transposed matrix P. +Differentiation of barycentric interpolant: first derivative. -Input parameters: - QP - matrices Q and P in compact form. - Output of ToBidiagonal subroutine. - M - number of rows in matrix A. - N - number of columns in matrix A. - TAUP - scalar factors which are used to form P. - Output of ToBidiagonal subroutine. - PTRows - required number of rows of matrix P^T. N >= PTRows >= 0. +Algorithm used in this subroutine is very robust and should not fail until +provided with values too close to MaxRealNumber (usually MaxRealNumber/N +or greater will overflow). + +INPUT PARAMETERS: + B - barycentric interpolant built with one of model building + subroutines. + T - interpolation point + +OUTPUT PARAMETERS: + F - barycentric interpolant at T + DF - first derivative + +NOTE -Output parameters: - PT - first PTRows columns of matrix P^T - Array[0..PTRows-1, 0..N-1] - If PTRows=0, the array is not modified. -- ALGLIB -- - 2005-2010 - Bochkanov Sergey + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixbdunpackpt( - real_2d_array qp, - ae_int_t m, - ae_int_t n, - real_1d_array taup, - ae_int_t ptrows, - real_2d_array& pt); +
    void alglib::barycentricdiff1( + barycentricinterpolant b, + double t, + double& f, + double& df, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Unpacking matrix Q which reduces a matrix to bidiagonal form. +Differentiation of barycentric interpolant: first/second derivatives. -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + B - barycentric interpolant built with one of model building + subroutines. + T - interpolation point - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +OUTPUT PARAMETERS: + F - barycentric interpolant at T + DF - first derivative + D2F - second derivative -Input parameters: - QP - matrices Q and P in compact form. - Output of ToBidiagonal subroutine. - M - number of rows in matrix A. - N - number of columns in matrix A. - TAUQ - scalar factors which are used to form Q. - Output of ToBidiagonal subroutine. - QColumns - required number of columns in matrix Q. - M>=QColumns>=0. +NOTE: this algorithm may fail due to overflow/underflor if used on data +whose values are close to MaxRealNumber or MinRealNumber. Use more robust +BarycentricDiff1() subroutine in such cases. -Output parameters: - Q - first QColumns columns of matrix Q. - Array[0..M-1, 0..QColumns-1] - If QColumns=0, the array is not modified. -- ALGLIB -- - 2005-2010 - Bochkanov Sergey + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixbdunpackq( - real_2d_array qp, - ae_int_t m, - ae_int_t n, - real_1d_array tauq, - ae_int_t qcolumns, - real_2d_array& q); +
    void alglib::barycentricdiff2( + barycentricinterpolant b, + double t, + double& f, + double& df, + double& d2f, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Reduction of a square matrix to upper Hessenberg form: Q'*A*Q = H, -where Q is an orthogonal matrix, H - Hessenberg matrix. +This subroutine performs linear transformation of the argument. -COMMERCIAL EDITION OF ALGLIB: +INPUT PARAMETERS: + B - rational interpolant in barycentric form + CA, CB - transformation coefficients: x = CA*t + CB - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +OUTPUT PARAMETERS: + B - transformed interpolant with X replaced by T -Input parameters: - A - matrix A with elements [0..N-1, 0..N-1] - N - size of matrix A. + -- ALGLIB PROJECT -- + Copyright 19.08.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::barycentriclintransx( + barycentricinterpolant b, + double ca, + double cb, + const xparams _params = alglib::xdefault); -Output parameters: - A - matrices Q and P in compact form (see below). - Tau - array of scalar factors which are used to form matrix Q. - Array whose index ranges within [0..N-2] +
    + +
    +
    /************************************************************************* +This subroutine performs linear transformation of the barycentric +interpolant. -Matrix H is located on the main diagonal, on the lower secondary diagonal -and above the main diagonal of matrix A. The elements which are used to -form matrix Q are situated in array Tau and below the lower secondary -diagonal of matrix A as follows: +INPUT PARAMETERS: + B - rational interpolant in barycentric form + CA, CB - transformation coefficients: B2(x) = CA*B(x) + CB -Matrix Q is represented as a product of elementary reflections +OUTPUT PARAMETERS: + B - transformed interpolant -Q = H(0)*H(2)*...*H(n-2), + -- ALGLIB PROJECT -- + Copyright 19.08.2009 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::barycentriclintransy( + barycentricinterpolant b, + double ca, + double cb, + const xparams _params = alglib::xdefault); -where each H(i) is given by +
    + +
    +
    /************************************************************************* +Extracts X/Y/W arrays from rational interpolant -H(i) = 1 - tau * v * (v^T) +INPUT PARAMETERS: + B - barycentric interpolant -where tau is a scalar stored in Tau[I]; v - is a real vector, -so that v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) stored in A(i+2:n-1,i). +OUTPUT PARAMETERS: + N - nodes count, N>0 + X - interpolation nodes, array[0..N-1] + F - function values, array[0..N-1] + W - barycentric weights, array[0..N-1] - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 + -- ALGLIB -- + Copyright 17.08.2009 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixhessenberg( - real_2d_array& a, - ae_int_t n, - real_1d_array& tau); +
    void alglib::barycentricunpack( + barycentricinterpolant b, + ae_int_t& n, + real_1d_array& x, + real_1d_array& y, + real_1d_array& w, + const xparams _params = alglib::xdefault);
    - + + +
     
    /************************************************************************* -Unpacking matrix H (the result of matrix A reduction to upper Hessenberg form) +Buffer object which is used to perform nearest neighbor requests in the +multithreaded mode (multiple threads working with same KD-tree object). -Input parameters: - A - output of RMatrixHessenberg subroutine. - N - size of matrix A. +This object should be created with KDTreeCreateBuffer(). +*************************************************************************/ +
    class rbfcalcbuffer +{ +}; -Output parameters: - H - matrix H. Array whose indexes range within [0..N-1, 0..N-1]. +
    + +
    +
    /************************************************************************* +RBF model. + +Never try to directly work with fields of this object - always use ALGLIB +functions to use this object. +*************************************************************************/ +
    class rbfmodel +{ +}; + +
    + +
    +
    /************************************************************************* +RBF solution report: +* TerminationType - termination type, positive values - success, + non-positive - failure. + +Fields which are set by modern RBF solvers (hierarchical): +* RMSError - root-mean-square error; NAN for old solvers (ML, QNN) +* MaxError - maximum error; NAN for old solvers (ML, QNN) +*************************************************************************/ +
    class rbfreport +{ + double rmserror; + double maxerror; + ae_int_t arows; + ae_int_t acols; + ae_int_t annz; + ae_int_t iterationscount; + ae_int_t nmv; + ae_int_t terminationtype; +}; + +
    + +
    +
    /************************************************************************* +This function builds RBF model and returns report (contains some +information which can be used for evaluation of the algorithm properties). + +Call to this function modifies RBF model by calculating its centers/radii/ +weights and saving them into RBFModel structure. Initially RBFModel +contain zero coefficients, but after call to this function we will have +coefficients which were calculated in order to fit our dataset. + +After you called this function you can call RBFCalc(), RBFGridCalc() and +other model calculation functions. + +INPUT PARAMETERS: + S - RBF model, initialized by RBFCreate() call + Rep - report: + * Rep.TerminationType: + * -5 - non-distinct basis function centers were detected, + interpolation aborted; only QNN returns this + error code, other algorithms can handle non- + distinct nodes. + * -4 - nonconvergence of the internal SVD solver + * -3 incorrect model construction algorithm was chosen: + QNN or RBF-ML, combined with one of the incompatible + features - NX=1 or NX>3; points with per-dimension + scales. + * 1 - successful termination + * 8 - a termination request was submitted via + rbfrequesttermination() function. + + Fields which are set only by modern RBF solvers (hierarchical + or nonnegative; older solvers like QNN and ML initialize these + fields by NANs): + * rep.rmserror - root-mean-square error at nodes + * rep.maxerror - maximum error at nodes + + Fields are used for debugging purposes: + * Rep.IterationsCount - iterations count of the LSQR solver + * Rep.NMV - number of matrix-vector products + * Rep.ARows - rows count for the system matrix + * Rep.ACols - columns count for the system matrix + * Rep.ANNZ - number of significantly non-zero elements + (elements above some algorithm-determined threshold) + +NOTE: failure to build model will leave current state of the structure +unchanged. -- ALGLIB -- - 2005-2010 - Bochkanov Sergey + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixhessenbergunpackh( - real_2d_array a, - ae_int_t n, - real_2d_array& h); +
    void alglib::rbfbuildmodel( + rbfmodel s, + rbfreport& rep, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -Unpacking matrix Q which reduces matrix A to upper Hessenberg form +This function calculates values of the RBF model at the given point. -COMMERCIAL EDITION OF ALGLIB: +This is general function which can be used for arbitrary NX (dimension of +the space of arguments) and NY (dimension of the function itself). However +when you have NY=1 you may find more convenient to use rbfcalc2() or +rbfcalc3(). - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +If you want to perform parallel model evaluation from multiple threads, +use rbftscalcbuf() with per-thread buffer object. -Input parameters: - A - output of RMatrixHessenberg subroutine. - N - size of matrix A. - Tau - scalar factors which are used to form Q. - Output of RMatrixHessenberg subroutine. +This function returns 0.0 when model is not initialized. -Output parameters: - Q - matrix Q. - Array whose indexes range within [0..N-1, 0..N-1]. +INPUT PARAMETERS: + S - RBF model + X - coordinates, array[NX]. + X may have more than NX elements, in this case only + leading NX will be used. + +OUTPUT PARAMETERS: + Y - function value, array[NY]. Y is out-parameter and + reallocated after call to this function. In case you want + to reuse previously allocated Y, you may use RBFCalcBuf(), + which reallocates Y only when it is too small. -- ALGLIB -- - 2005-2010 - Bochkanov Sergey + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixhessenbergunpackq( - real_2d_array a, - ae_int_t n, - real_1d_array tau, - real_2d_array& q); +
    void alglib::rbfcalc( + rbfmodel s, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -LQ decomposition of a rectangular matrix of size MxN +This function calculates values of the RBF model in the given point. -COMMERCIAL EDITION OF ALGLIB: +IMPORTANT: this function works only with modern (hierarchical) RBFs. It + can not be used with legacy (version 1) RBFs because older RBF + code does not support 1-dimensional models. + +This function should be used when we have NY=1 (scalar function) and NX=1 +(1-dimensional space). If you have 3-dimensional space, use rbfcalc3(). If +you have 2-dimensional space, use rbfcalc3(). If you have general +situation (NX-dimensional space, NY-dimensional function) you should use +generic rbfcalc(). - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +If you want to perform parallel model evaluation from multiple threads, +use rbftscalcbuf() with per-thread buffer object. -Input parameters: - A - matrix A whose indexes range within [0..M-1, 0..N-1]. - M - number of rows in matrix A. - N - number of columns in matrix A. +This function returns 0.0 when: +* model is not initialized +* NX<>1 +* NY<>1 -Output parameters: - A - matrices L and Q in compact form (see below) - Tau - array of scalar factors which are used to form - matrix Q. Array whose index ranges within [0..Min(M,N)-1]. +INPUT PARAMETERS: + S - RBF model + X0 - X-coordinate, finite number -Matrix A is represented as A = LQ, where Q is an orthogonal matrix of size -MxM, L - lower triangular (or lower trapezoid) matrix of size M x N. +RESULT: + value of the model or 0.0 (as defined above) -The elements of matrix L are located on and below the main diagonal of -matrix A. The elements which are located in Tau array and above the main -diagonal of matrix A are used to form matrix Q as follows: + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::rbfcalc1( + rbfmodel s, + double x0, + const xparams _params = alglib::xdefault); -Matrix Q is represented as a product of elementary reflections +
    + +
    +
    /************************************************************************* +This function calculates values of the RBF model in the given point. -Q = H(k-1)*H(k-2)*...*H(1)*H(0), +This function should be used when we have NY=1 (scalar function) and NX=2 +(2-dimensional space). If you have 3-dimensional space, use rbfcalc3(). If +you have general situation (NX-dimensional space, NY-dimensional function) +you should use generic rbfcalc(). -where k = min(m,n), and each H(i) is of the form +If you want to calculate function values many times, consider using +rbfgridcalc2v(), which is far more efficient than many subsequent calls to +rbfcalc2(). -H(i) = 1 - tau * v * (v^T) +If you want to perform parallel model evaluation from multiple threads, +use rbftscalcbuf() with per-thread buffer object. -where tau is a scalar stored in Tau[I]; v - real vector, so that v(0:i-1)=0, -v(i) = 1, v(i+1:n-1) stored in A(i,i+1:n-1). +This function returns 0.0 when: +* model is not initialized +* NX<>2 + *NY<>1 - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey +INPUT PARAMETERS: + S - RBF model + X0 - first coordinate, finite number + X1 - second coordinate, finite number + +RESULT: + value of the model or 0.0 (as defined above) + + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlq( - real_2d_array& a, - ae_int_t m, - ae_int_t n, - real_1d_array& tau); -void alglib::smp_rmatrixlq( - real_2d_array& a, - ae_int_t m, - ae_int_t n, - real_1d_array& tau); +
    double alglib::rbfcalc2( + rbfmodel s, + double x0, + double x1, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Unpacking of matrix L from the LQ decomposition of a matrix A +This function calculates value of the RBF model in the given point. -Input parameters: - A - matrices Q and L in compact form. - Output of RMatrixLQ subroutine. - M - number of rows in given matrix A. M>=0. - N - number of columns in given matrix A. N>=0. +This function should be used when we have NY=1 (scalar function) and NX=3 +(3-dimensional space). If you have 2-dimensional space, use rbfcalc2(). If +you have general situation (NX-dimensional space, NY-dimensional function) +you should use generic rbfcalc(). -Output parameters: - L - matrix L, array[0..M-1, 0..N-1]. +If you want to calculate function values many times, consider using +rbfgridcalc3v(), which is far more efficient than many subsequent calls to +rbfcalc3(). - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey +If you want to perform parallel model evaluation from multiple threads, +use rbftscalcbuf() with per-thread buffer object. + +This function returns 0.0 when: +* model is not initialized +* NX<>3 + *NY<>1 + +INPUT PARAMETERS: + S - RBF model + X0 - first coordinate, finite number + X1 - second coordinate, finite number + X2 - third coordinate, finite number + +RESULT: + value of the model or 0.0 (as defined above) + + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlqunpackl( - real_2d_array a, - ae_int_t m, - ae_int_t n, - real_2d_array& l); +
    double alglib::rbfcalc3( + rbfmodel s, + double x0, + double x1, + double x2, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Partial unpacking of matrix Q from the LQ decomposition of a matrix A +This function calculates values of the RBF model at the given point. -COMMERCIAL EDITION OF ALGLIB: +Same as rbfcalc(), but does not reallocate Y when in is large enough to +store function values. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +If you want to perform parallel model evaluation from multiple threads, +use rbftscalcbuf() with per-thread buffer object. -Input parameters: - A - matrices L and Q in compact form. - Output of RMatrixLQ subroutine. - M - number of rows in given matrix A. M>=0. - N - number of columns in given matrix A. N>=0. - Tau - scalar factors which are used to form Q. - Output of the RMatrixLQ subroutine. - QRows - required number of rows in matrix Q. N>=QRows>=0. +INPUT PARAMETERS: + S - RBF model + X - coordinates, array[NX]. + X may have more than NX elements, in this case only + leading NX will be used. + Y - possibly preallocated array -Output parameters: - Q - first QRows rows of matrix Q. Array whose indexes range - within [0..QRows-1, 0..N-1]. If QRows=0, the array remains - unchanged. +OUTPUT PARAMETERS: + Y - function value, array[NY]. Y is not reallocated when it + is larger than NY. - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixlqunpackq( - real_2d_array a, - ae_int_t m, - ae_int_t n, - real_1d_array tau, - ae_int_t qrows, - real_2d_array& q); -void alglib::smp_rmatrixlqunpackq( - real_2d_array a, - ae_int_t m, - ae_int_t n, - real_1d_array tau, - ae_int_t qrows, - real_2d_array& q); +
    void alglib::rbfcalcbuf( + rbfmodel s, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -QR decomposition of a rectangular matrix of size MxN - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +This function creates RBF model for a scalar (NY=1) or vector (NY>1) +function in a NX-dimensional space (NX>=1). -Input parameters: - A - matrix A whose indexes range within [0..M-1, 0..N-1]. - M - number of rows in matrix A. - N - number of columns in matrix A. +Newly created model is empty. It can be used for interpolation right after +creation, but it just returns zeros. You have to add points to the model, +tune interpolation settings, and then call model construction function +rbfbuildmodel() which will update model according to your specification. -Output parameters: - A - matrices Q and R in compact form (see below). - Tau - array of scalar factors which are used to form - matrix Q. Array whose index ranges within [0.. Min(M-1,N-1)]. +USAGE: +1. User creates model with rbfcreate() +2. User adds dataset with rbfsetpoints() (points do NOT have to be on a + regular grid) or rbfsetpointsandscales(). +3. (OPTIONAL) User chooses polynomial term by calling: + * rbflinterm() to set linear term + * rbfconstterm() to set constant term + * rbfzeroterm() to set zero term + By default, linear term is used. +4. User tweaks algorithm properties with rbfsetalgohierarchical() method + (or chooses one of the legacy algorithms - QNN (rbfsetalgoqnn) or ML + (rbfsetalgomultilayer)). +5. User calls rbfbuildmodel() function which rebuilds model according to + the specification +6. User may call rbfcalc() to calculate model value at the specified point, + rbfgridcalc() to calculate model values at the points of the regular + grid. User may extract model coefficients with rbfunpack() call. + +IMPORTANT: we recommend you to use latest model construction algorithm - + hierarchical RBFs, which is activated by rbfsetalgohierarchical() + function. This algorithm is the fastest one, and most memory- + efficient. + However, it is incompatible with older versions of ALGLIB + (pre-3.11). So, if you serialize hierarchical model, you will + be unable to load it in pre-3.11 ALGLIB. Other model types (QNN + and RBF-ML) are still backward-compatible. -Matrix A is represented as A = QR, where Q is an orthogonal matrix of size -MxM, R - upper triangular (or upper trapezoid) matrix of size M x N. +INPUT PARAMETERS: + NX - dimension of the space, NX>=1 + NY - function dimension, NY>=1 -The elements of matrix R are located on and above the main diagonal of -matrix A. The elements which are located in Tau array and below the main -diagonal of matrix A are used to form matrix Q as follows: +OUTPUT PARAMETERS: + S - RBF model (initially equals to zero) -Matrix Q is represented as a product of elementary reflections +NOTE 1: memory requirements. RBF models require amount of memory which is + proportional to the number of data points. Some additional memory + is allocated during model construction, but most of this memory is + freed after model coefficients are calculated. Amount of this + additional memory depends on model construction algorithm being + used. -Q = H(0)*H(2)*...*H(k-1), +NOTE 2: prior to ALGLIB version 3.11, RBF models supported only NX=2 or + NX=3. Any attempt to create single-dimensional or more than + 3-dimensional RBF model resulted in exception. -where k = min(m,n), and each H(i) is in the form + ALGLIB 3.11 supports any NX>0, but models created with NX!=2 and + NX!=3 are incompatible with (a) older versions of ALGLIB, (b) old + model construction algorithms (QNN or RBF-ML). -H(i) = 1 - tau * v * (v^T) + So, if you create a model with NX=2 or NX=3, then, depending on + specific model construction algorithm being chosen, you will (QNN + and RBF-ML) or will not (HierarchicalRBF) get backward compatibility + with older versions of ALGLIB. You have a choice here. -where tau is a scalar stored in Tau[I]; v - real vector, -so that v(0:i-1) = 0, v(i) = 1, v(i+1:m-1) stored in A(i+1:m-1,i). + However, if you create a model with NX neither 2 nor 3, you have + no backward compatibility from the start, and you are forced to + use hierarchical RBFs and ALGLIB 3.11 or later. - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 13.12.2011, 20.06.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixqr( - real_2d_array& a, - ae_int_t m, - ae_int_t n, - real_1d_array& tau); -void alglib::smp_rmatrixqr( - real_2d_array& a, - ae_int_t m, - ae_int_t n, - real_1d_array& tau); +
    void alglib::rbfcreate( + ae_int_t nx, + ae_int_t ny, + rbfmodel& s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  [4]  

    +
     
    /************************************************************************* -Partial unpacking of matrix Q from the QR decomposition of a matrix A +This function creates buffer structure which can be used to perform +parallel RBF model evaluations (with one RBF model instance being +used from multiple threads, as long as different threads use different +instances of buffer). -COMMERCIAL EDITION OF ALGLIB: +This buffer object can be used with rbftscalcbuf() function (here "ts" +stands for "thread-safe", "buf" is a suffix which denotes function which +reuses previously allocated output space). - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that QP decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=512, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +How to use it: +* create RBF model structure with rbfcreate() +* load data, tune parameters +* call rbfbuildmodel() +* call rbfcreatecalcbuffer(), once per thread working with RBF model (you + should call this function only AFTER call to rbfbuildmodel(), see below + for more information) +* call rbftscalcbuf() from different threads, with each thread working + with its own copy of buffer object. -Input parameters: - A - matrices Q and R in compact form. - Output of RMatrixQR subroutine. - M - number of rows in given matrix A. M>=0. - N - number of columns in given matrix A. N>=0. - Tau - scalar factors which are used to form Q. - Output of the RMatrixQR subroutine. - QColumns - required number of columns of matrix Q. M>=QColumns>=0. +INPUT PARAMETERS + S - RBF model -Output parameters: - Q - first QColumns columns of matrix Q. - Array whose indexes range within [0..M-1, 0..QColumns-1]. - If QColumns=0, the array remains unchanged. +OUTPUT PARAMETERS + Buf - external buffer. - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey + +IMPORTANT: buffer object should be used only with RBF model object which + was used to initialize buffer. Any attempt to use buffer with + different object is dangerous - you may get memory violation + error because sizes of internal arrays do not fit to dimensions + of RBF structure. + +IMPORTANT: you should call this function only for model which was built + with rbfbuildmodel() function, after successful invocation of + rbfbuildmodel(). Sizes of some internal structures are + determined only after model is built, so buffer object created + before model construction stage will be useless (and any + attempt to use it will result in exception). + + -- ALGLIB -- + Copyright 02.04.2016 by Sergey Bochkanov *************************************************************************/ -
    void alglib::rmatrixqrunpackq( - real_2d_array a, - ae_int_t m, - ae_int_t n, - real_1d_array tau, - ae_int_t qcolumns, - real_2d_array& q); -void alglib::smp_rmatrixqrunpackq( - real_2d_array a, - ae_int_t m, - ae_int_t n, - real_1d_array tau, - ae_int_t qcolumns, - real_2d_array& q); +
    void alglib::rbfcreatecalcbuffer( + rbfmodel s, + rbfcalcbuffer& buf, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Unpacking of matrix R from the QR decomposition of a matrix A +This function returns model version. -Input parameters: - A - matrices Q and R in compact form. - Output of RMatrixQR subroutine. - M - number of rows in given matrix A. M>=0. - N - number of columns in given matrix A. N>=0. +INPUT PARAMETERS: + S - RBF model -Output parameters: - R - matrix R, array[0..M-1, 0..N-1]. +RESULT: + * 1 - for models created by QNN and RBF-ML algorithms, + compatible with ALGLIB 3.10 or earlier. + * 2 - for models created by HierarchicalRBF, requires + ALGLIB 3.11 or later - -- ALGLIB routine -- - 17.02.2010 - Bochkanov Sergey + -- ALGLIB -- + Copyright 06.07.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rmatrixqrunpackr( - real_2d_array a, - ae_int_t m, - ae_int_t n, - real_2d_array& r); +
    ae_int_t alglib::rbfgetmodelversion( + rbfmodel s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Reduction of a symmetric matrix which is given by its higher or lower -triangular part to a tridiagonal matrix using orthogonal similarity -transformation: Q'*A*Q=T. +This is legacy function for gridded calculation of RBF model. -COMMERCIAL EDITION OF ALGLIB: +It is superseded by rbfgridcalc2v() and rbfgridcalc2vsubset() functions. - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::rbfgridcalc2( + rbfmodel s, + real_1d_array x0, + ae_int_t n0, + real_1d_array x1, + ae_int_t n1, + real_2d_array& y, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function calculates values of the RBF model at the regular grid, +which has N0*N1 points, with Point[I,J] = (X0[I], X1[J]). Vector-valued +RBF models are supported. + +This function returns 0.0 when: +* model is not initialized +* NX<>2 + + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Multithreaded acceleration is NOT supported for this function. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -Input parameters: - A - matrix to be transformed - array with elements [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - storage format. If IsUpper = True, then matrix A is given - by its upper triangle, and the lower triangle is not used - and not modified by the algorithm, and vice versa - if IsUpper = False. - -Output parameters: - A - matrices T and Q in compact form (see lower) - Tau - array of factors which are forming matrices H(i) - array with elements [0..N-2]. - D - main diagonal of symmetric matrix T. - array with elements [0..N-1]. - E - secondary diagonal of symmetric matrix T. - array with elements [0..N-2]. - - - If IsUpper=True, the matrix Q is represented as a product of elementary - reflectors - - Q = H(n-2) . . . H(2) H(0). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(i+1:n-1) = 0, v(i) = 1, v(0:i-1) is stored on exit in - A(0:i-1,i+1), and tau in TAU(i). - - If IsUpper=False, the matrix Q is represented as a product of elementary - reflectors - - Q = H(0) H(2) . . . H(n-2). - - Each H(i) has the form - - H(i) = I - tau * v * v' - - where tau is a real scalar, and v is a real vector with - v(0:i) = 0, v(i+1) = 1, v(i+2:n-1) is stored on exit in A(i+2:n-1,i), - and tau in TAU(i). +NOTE: Parallel processing is implemented only for modern (hierarchical) + RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still + processed serially. - The contents of A on exit are illustrated by the following examples - with n = 5: +INPUT PARAMETERS: + S - RBF model, used in read-only mode, can be shared between + multiple invocations of this function from multiple + threads. - if UPLO = 'U': if UPLO = 'L': + X0 - array of grid nodes, first coordinates, array[N0]. + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N0 - grid size (number of nodes) in the first dimension - ( d e v1 v2 v3 ) ( d ) - ( d e v2 v3 ) ( e d ) - ( d e v3 ) ( v0 e d ) - ( d e ) ( v0 v1 e d ) - ( d ) ( v0 v1 v2 e d ) + X1 - array of grid nodes, second coordinates, array[N1] + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N1 - grid size (number of nodes) in the second dimension - where d and e denote diagonal and off-diagonal elements of T, and vi - denotes an element of the vector defining H(i). +OUTPUT PARAMETERS: + Y - function values, array[NY*N0*N1], where NY is a number of + "output" vector values (this function supports vector- + valued RBF models). Y is out-variable and is reallocated + by this function. + Y[K+NY*(I0+I1*N0)]=F_k(X0[I0],X1[I1]), for: + * K=0...NY-1 + * I0=0...N0-1 + * I1=0...N1-1 + +NOTE: this function supports weakly ordered grid nodes, i.e. you may have + X[i]=X[i+1] for some i. It does not provide you any performance + benefits due to duplication of points, just convenience and + flexibility. + +NOTE: this function is re-entrant, i.e. you may use same rbfmodel + structure in multiple threads calling this function for different + grids. + +NOTE: if you need function values on some subset of regular grid, which + may be described as "several compact and dense islands", you may + use rbfgridcalc2vsubset(). - -- LAPACK routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - October 31, 1992 + -- ALGLIB -- + Copyright 27.01.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::smatrixtd( - real_2d_array& a, - ae_int_t n, - bool isupper, - real_1d_array& tau, - real_1d_array& d, - real_1d_array& e); +
    void alglib::rbfgridcalc2v( + rbfmodel s, + real_1d_array x0, + ae_int_t n0, + real_1d_array x1, + ae_int_t n1, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Unpacking matrix Q which reduces symmetric matrix to a tridiagonal -form. - +This function calculates values of the RBF model at some subset of regular +grid: +* grid has N0*N1 points, with Point[I,J] = (X0[I], X1[J]) +* only values at some subset of this grid are required +Vector-valued RBF models are supported. -COMMERCIAL EDITION OF ALGLIB: +This function returns 0.0 when: +* model is not initialized +* NX<>2 - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Multithreaded acceleration is NOT supported for this function. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. -Input parameters: - A - the result of a SMatrixTD subroutine - N - size of matrix A. - IsUpper - storage format (a parameter of SMatrixTD subroutine) - Tau - the result of a SMatrixTD subroutine +NOTE: Parallel processing is implemented only for modern (hierarchical) + RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still + processed serially. -Output parameters: - Q - transformation matrix. - array with elements [0..N-1, 0..N-1]. +INPUT PARAMETERS: + S - RBF model, used in read-only mode, can be shared between + multiple invocations of this function from multiple + threads. - -- ALGLIB -- - Copyright 2005-2010 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::smatrixtdunpackq( - real_2d_array a, - ae_int_t n, - bool isupper, - real_1d_array tau, - real_2d_array& q); + X0 - array of grid nodes, first coordinates, array[N0]. + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N0 - grid size (number of nodes) in the first dimension -
    - - - -
    -
    /************************************************************************* -Parametric spline inteprolant: 2-dimensional curve. + X1 - array of grid nodes, second coordinates, array[N1] + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N1 - grid size (number of nodes) in the second dimension -You should not try to access its members directly - use PSpline2XXXXXXXX() -functions instead. + FlagY - array[N0*N1]: + * Y[I0+I1*N0] corresponds to node (X0[I0],X1[I1]) + * it is a "bitmap" array which contains False for nodes + which are NOT calculated, and True for nodes which are + required. + +OUTPUT PARAMETERS: + Y - function values, array[NY*N0*N1*N2], where NY is a number + of "output" vector values (this function supports vector- + valued RBF models): + * Y[K+NY*(I0+I1*N0)]=F_k(X0[I0],X1[I1]), + for K=0...NY-1, I0=0...N0-1, I1=0...N1-1. + * elements of Y[] which correspond to FlagY[]=True are + loaded by model values (which may be exactly zero for + some nodes). + * elements of Y[] which correspond to FlagY[]=False MAY be + initialized by zeros OR may be calculated. This function + processes grid as a hierarchy of nested blocks and + micro-rows. If just one element of micro-row is required, + entire micro-row (up to 8 nodes in the current version, + but no promises) is calculated. + +NOTE: this function supports weakly ordered grid nodes, i.e. you may have + X[i]=X[i+1] for some i. It does not provide you any performance + benefits due to duplication of points, just convenience and + flexibility. + +NOTE: this function is re-entrant, i.e. you may use same rbfmodel + structure in multiple threads calling this function for different + grids. + + -- ALGLIB -- + Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ -
    class pspline2interpolant -{ -}; +
    void alglib::rbfgridcalc2vsubset( + rbfmodel s, + real_1d_array x0, + ae_int_t n0, + real_1d_array x1, + ae_int_t n1, + boolean_1d_array flagy, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Parametric spline inteprolant: 3-dimensional curve. +This function calculates values of the RBF model at the regular grid, +which has N0*N1*N2 points, with Point[I,J,K] = (X0[I], X1[J], X2[K]). +Vector-valued RBF models are supported. -You should not try to access its members directly - use PSpline3XXXXXXXX() -functions instead. +This function returns 0.0 when: +* model is not initialized +* NX<>3 + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +NOTE: Parallel processing is implemented only for modern (hierarchical) + RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still + processed serially. + +INPUT PARAMETERS: + S - RBF model, used in read-only mode, can be shared between + multiple invocations of this function from multiple + threads. + + X0 - array of grid nodes, first coordinates, array[N0]. + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N0 - grid size (number of nodes) in the first dimension + + X1 - array of grid nodes, second coordinates, array[N1] + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N1 - grid size (number of nodes) in the second dimension + + X2 - array of grid nodes, third coordinates, array[N2] + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N2 - grid size (number of nodes) in the third dimension + +OUTPUT PARAMETERS: + Y - function values, array[NY*N0*N1*N2], where NY is a number + of "output" vector values (this function supports vector- + valued RBF models). Y is out-variable and is reallocated + by this function. + Y[K+NY*(I0+I1*N0+I2*N0*N1)]=F_k(X0[I0],X1[I1],X2[I2]), for: + * K=0...NY-1 + * I0=0...N0-1 + * I1=0...N1-1 + * I2=0...N2-1 + +NOTE: this function supports weakly ordered grid nodes, i.e. you may have + X[i]=X[i+1] for some i. It does not provide you any performance + benefits due to duplication of points, just convenience and + flexibility. + +NOTE: this function is re-entrant, i.e. you may use same rbfmodel + structure in multiple threads calling this function for different + grids. + +NOTE: if you need function values on some subset of regular grid, which + may be described as "several compact and dense islands", you may + use rbfgridcalc3vsubset(). + + -- ALGLIB -- + Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ -
    class pspline3interpolant -{ -}; +
    void alglib::rbfgridcalc3v( + rbfmodel s, + real_1d_array x0, + ae_int_t n0, + real_1d_array x1, + ae_int_t n1, + real_1d_array x2, + ae_int_t n2, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine fits piecewise linear curve to points with Ramer-Douglas- -Peucker algorithm. This function performs PARAMETRIC fit, i.e. it can be -used to fit curves like circles. +This function calculates values of the RBF model at some subset of regular +grid: +* grid has N0*N1*N2 points, with Point[I,J,K] = (X0[I], X1[J], X2[K]) +* only values at some subset of this grid are required +Vector-valued RBF models are supported. -On input it accepts dataset which describes parametric multidimensional -curve X(t), with X being vector, and t taking values in [0,N), where N is -a number of points in dataset. As result, it returns reduced dataset X2, -which can be used to build parametric curve X2(t), which approximates -X(t) with desired precision (or has specified number of sections). +This function returns 0.0 when: +* model is not initialized +* NX<>3 + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. +NOTE: Parallel processing is implemented only for modern (hierarchical) + RBFs. Legacy version 1 RBFs (created by QNN or RBF-ML) are still + processed serially. INPUT PARAMETERS: - X - array of multidimensional points: - * at least N elements, leading N elements are used if more - than N elements were specified - * order of points is IMPORTANT because it is parametric - fit - * each row of array is one point which has D coordinates - N - number of elements in X - D - number of dimensions (elements per row of X) - StopM - stopping condition - desired number of sections: - * at most M sections are generated by this function - * less than M sections can be generated if we have N<M - (or some X are non-distinct). - * zero StopM means that algorithm does not stop after - achieving some pre-specified section count - StopEps - stopping condition - desired precision: - * algorithm stops after error in each section is at most Eps - * zero Eps means that algorithm does not stop after - achieving some pre-specified precision + S - RBF model, used in read-only mode, can be shared between + multiple invocations of this function from multiple + threads. -OUTPUT PARAMETERS: - X2 - array of corner points for piecewise approximation, - has length NSections+1 or zero (for NSections=0). - Idx2 - array of indexes (parameter values): - * has length NSections+1 or zero (for NSections=0). - * each element of Idx2 corresponds to same-numbered - element of X2 - * each element of Idx2 is index of corresponding element - of X2 at original array X, i.e. I-th row of X2 is - Idx2[I]-th row of X. - * elements of Idx2 can be treated as parameter values - which should be used when building new parametric curve - * Idx2[0]=0, Idx2[NSections]=N-1 - NSections- number of sections found by algorithm, NSections<=M, - NSections can be zero for degenerate datasets - (N<=1 or all X[] are non-distinct). + X0 - array of grid nodes, first coordinates, array[N0]. + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N0 - grid size (number of nodes) in the first dimension -NOTE: algorithm stops after: - a) dividing curve into StopM sections - b) achieving required precision StopEps - c) dividing curve into N-1 sections - If both StopM and StopEps are non-zero, algorithm is stopped by the - FIRST criterion which is satisfied. In case both StopM and StopEps - are zero, algorithm stops because of (c). + X1 - array of grid nodes, second coordinates, array[N1] + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N1 - grid size (number of nodes) in the second dimension + + X2 - array of grid nodes, third coordinates, array[N2] + Must be ordered by ascending. Exception is generated + if the array is not correctly ordered. + N2 - grid size (number of nodes) in the third dimension + + FlagY - array[N0*N1*N2]: + * Y[I0+I1*N0+I2*N0*N1] corresponds to node (X0[I0],X1[I1],X2[I2]) + * it is a "bitmap" array which contains False for nodes + which are NOT calculated, and True for nodes which are + required. + +OUTPUT PARAMETERS: + Y - function values, array[NY*N0*N1*N2], where NY is a number + of "output" vector values (this function supports vector- + valued RBF models): + * Y[K+NY*(I0+I1*N0+I2*N0*N1)]=F_k(X0[I0],X1[I1],X2[I2]), + for K=0...NY-1, I0=0...N0-1, I1=0...N1-1, I2=0...N2-1. + * elements of Y[] which correspond to FlagY[]=True are + loaded by model values (which may be exactly zero for + some nodes). + * elements of Y[] which correspond to FlagY[]=False MAY be + initialized by zeros OR may be calculated. This function + processes grid as a hierarchy of nested blocks and + micro-rows. If just one element of micro-row is required, + entire micro-row (up to 8 nodes in the current version, + but no promises) is calculated. + +NOTE: this function supports weakly ordered grid nodes, i.e. you may have + X[i]=X[i+1] for some i. It does not provide you any performance + benefits due to duplication of points, just convenience and + flexibility. + +NOTE: this function is re-entrant, i.e. you may use same rbfmodel + structure in multiple threads calling this function for different + grids. -- ALGLIB -- - Copyright 02.10.2014 by Bochkanov Sergey + Copyright 04.03.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::parametricrdpfixed( - real_2d_array x, - ae_int_t n, - ae_int_t d, - ae_int_t stopm, - double stopeps, - real_2d_array& x2, - integer_1d_array& idx2, - ae_int_t& nsections); +
    void alglib::rbfgridcalc3vsubset( + rbfmodel s, + real_1d_array x0, + ae_int_t n0, + real_1d_array x1, + ae_int_t n1, + real_1d_array x2, + ae_int_t n2, + boolean_1d_array flagy, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function calculates arc length, i.e. length of curve between t=a -and t=b. +This function is used to peek into hierarchical RBF construction process +from some other thread and get current progress indicator. It returns +value in [0,1]. + +IMPORTANT: only HRBFs (hierarchical RBFs) support peeking into progress + indicator. Legacy RBF-ML and RBF-QNN do not support it. You + will always get 0 value. INPUT PARAMETERS: - P - parametric spline interpolant - A,B - parameter values corresponding to arc ends: - * B>A will result in positive length returned - * B<A will result in negative length returned + S - RBF model object RESULT: - length of arc starting at T=A and ending at T=B. + progress value, in [0,1] - - -- ALGLIB PROJECT -- - Copyright 30.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ -
    double alglib::pspline2arclength( - pspline2interpolant p, - double a, - double b); +
    double alglib::rbfpeekprogress( + rbfmodel s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function builds non-periodic 2-dimensional parametric spline which -starts at (X[0],Y[0]) and ends at (X[N-1],Y[N-1]). +This function is used to submit a request for termination of the +hierarchical RBF construction process from some other thread. As result, +RBF construction is terminated smoothly (with proper deallocation of all +necessary resources) and resultant model is filled by zeros. -INPUT PARAMETERS: - XY - points, array[0..N-1,0..1]. - XY[I,0:1] corresponds to the Ith point. - Order of points is important! - N - points count, N>=5 for Akima splines, N>=2 for other types of - splines. - ST - spline type: - * 0 Akima spline - * 1 parabolically terminated Catmull-Rom spline (Tension=0) - * 2 parabolically terminated cubic spline - PT - parameterization type: - * 0 uniform - * 1 chord length - * 2 centripetal +A rep.terminationtype=8 will be returned upon receiving such request. -OUTPUT PARAMETERS: - P - parametric spline interpolant +IMPORTANT: only HRBFs (hierarchical RBFs) support termination requests. + Legacy RBF-ML and RBF-QNN do not support it. An attempt to + terminate their construction will be ignored. +IMPORTANT: termination request flag is cleared when the model construction + starts. Thus, any pre-construction termination requests will be + silently ignored - only ones submitted AFTER construction has + actually began will be handled. -NOTES: -* this function assumes that there all consequent points are distinct. - I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. - However, non-consequent points may coincide, i.e. we can have (x0,y0)= - =(x2,y2). +INPUT PARAMETERS: + S - RBF model object - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 17.11.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2build( - real_2d_array xy, - ae_int_t n, - ae_int_t st, - ae_int_t pt, - pspline2interpolant& p); +
    void alglib::rbfrequesttermination( + rbfmodel s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function builds periodic 2-dimensional parametric spline which -starts at (X[0],Y[0]), goes through all points to (X[N-1],Y[N-1]) and then -back to (X[0],Y[0]). +This function serializes data structure to string. + +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void rbfserialize(rbfmodel &obj, std::string &s_out); +void rbfserialize(rbfmodel &obj, std::ostream &s_out); +
    + +
    +
    /************************************************************************* +This function sets RBF interpolation algorithm. ALGLIB supports several +RBF algorithms with different properties. + +This algorithm is called Hierarchical RBF. It similar to its previous +incarnation, RBF-ML, i.e. it also builds a sequence of models with +decreasing radii. However, it uses more economical way of building upper +layers (ones with large radii), which results in faster model construction +and evaluation, as well as smaller memory footprint during construction. + +This algorithm has following important features: +* ability to handle millions of points +* controllable smoothing via nonlinearity penalization +* support for NX-dimensional models with NX=1 or NX>3 (unlike QNN or RBF-ML) +* support for specification of per-dimensional radii via scale vector, + which is set by means of rbfsetpointsandscales() function. This feature + is useful if you solve spatio-temporal interpolation problems, where + different radii are required for spatial and temporal dimensions. + +Running times are roughly proportional to: +* N*log(N)*NLayers - for model construction +* N*NLayers - for model evaluation +You may see that running time does not depend on search radius or points +density, just on number of layers in the hierarchy. + +IMPORTANT: this model construction algorithm was introduced in ALGLIB 3.11 + and produces models which are INCOMPATIBLE with previous + versions of ALGLIB. You can not unserialize models produced + with this function in ALGLIB 3.10 or earlier. INPUT PARAMETERS: - XY - points, array[0..N-1,0..1]. - XY[I,0:1] corresponds to the Ith point. - XY[N-1,0:1] must be different from XY[0,0:1]. - Order of points is important! - N - points count, N>=3 for other types of splines. - ST - spline type: - * 1 Catmull-Rom spline (Tension=0) with cyclic boundary conditions - * 2 cubic spline with cyclic boundary conditions - PT - parameterization type: - * 0 uniform - * 1 chord length - * 2 centripetal + S - RBF model, initialized by rbfcreate() call + RBase - RBase parameter, RBase>0 + NLayers - NLayers parameter, NLayers>0, recommended value to start + with - about 5. + LambdaNS- >=0, nonlinearity penalty coefficient, negative values are + not allowed. This parameter adds controllable smoothing to + the problem, which may reduce noise. Specification of non- + zero lambda means that in addition to fitting error solver + will also minimize LambdaNS*|S''(x)|^2 (appropriately + generalized to multiple dimensions. + + Specification of exactly zero value means that no penalty + is added (we do not even evaluate matrix of second + derivatives which is necessary for smoothing). + + Calculation of nonlinearity penalty is costly - it results + in several-fold increase of model construction time. + Evaluation time remains the same. + + Optimal lambda is problem-dependent and requires trial + and error. Good value to start from is 1e-5...1e-6, + which corresponds to slightly noticeable smoothing of the + function. Value 1e-2 usually means that quite heavy + smoothing is applied. -OUTPUT PARAMETERS: - P - parametric spline interpolant +TUNING ALGORITHM +In order to use this algorithm you have to choose three parameters: +* initial radius RBase +* number of layers in the model NLayers +* penalty coefficient LambdaNS -NOTES: -* this function assumes that there all consequent points are distinct. - I.e. (x0,y0)<>(x1,y1), (x1,y1)<>(x2,y2), (x2,y2)<>(x3,y3) and so on. - However, non-consequent points may coincide, i.e. we can have (x0,y0)= - =(x2,y2). -* last point of sequence is NOT equal to the first point. You shouldn't - make curve "explicitly periodic" by making them equal. +Initial radius is easy to choose - you can pick any number several times +larger than the average distance between points. Algorithm won't break +down if you choose radius which is too large (model construction time will +increase, but model will be built correctly). - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey +Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used +by the last layer) will be smaller than the typical distance between +points. In case model error is too large, you can increase number of +layers. Having more layers will make model construction and evaluation +proportionally slower, but it will allow you to have model which precisely +fits your data. From the other side, if you want to suppress noise, you +can DECREASE number of layers to make your model less flexible (or specify +non-zero LambdaNS). + +TYPICAL ERRORS + +1. Using too small number of layers - RBF models with large radius are not + flexible enough to reproduce small variations in the target function. + You need many layers with different radii, from large to small, in + order to have good model. + +2. Using initial radius which is too small. You will get model with + "holes" in the areas which are too far away from interpolation centers. + However, algorithm will work correctly (and quickly) in this case. + + -- ALGLIB -- + Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2buildperiodic( - real_2d_array xy, - ae_int_t n, - ae_int_t st, - ae_int_t pt, - pspline2interpolant& p); +
    void alglib::rbfsetalgohierarchical( + rbfmodel s, + double rbase, + ae_int_t nlayers, + double lambdans, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -This function calculates the value of the parametric spline for a given -value of parameter T +DEPRECATED:since version 3.11 ALGLIB includes new RBF model construction + algorithm, Hierarchical RBF. This algorithm is faster and + requires less memory than QNN and RBF-ML. It is especially good + for large-scale interpolation problems. So, we recommend you to + consider Hierarchical RBF as default option. + +========================================================================== + +This function sets RBF interpolation algorithm. ALGLIB supports several +RBF algorithms with different properties. + +This algorithm is called RBF-ML. It builds multilayer RBF model, i.e. +model with subsequently decreasing radii, which allows us to combine +smoothness (due to large radii of the first layers) with exactness (due +to small radii of the last layers) and fast convergence. + +Internally RBF-ML uses many different means of acceleration, from sparse +matrices to KD-trees, which results in algorithm whose working time is +roughly proportional to N*log(N)*Density*RBase^2*NLayers, where N is a +number of points, Density is an average density if points per unit of the +interpolation space, RBase is an initial radius, NLayers is a number of +layers. + +RBF-ML is good for following kinds of interpolation problems: +1. "exact" problems (perfect fit) with well separated points +2. least squares problems with arbitrary distribution of points (algorithm + gives perfect fit where it is possible, and resorts to least squares + fit in the hard areas). +3. noisy problems where we want to apply some controlled amount of + smoothing. INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). + S - RBF model, initialized by RBFCreate() call + RBase - RBase parameter, RBase>0 + NLayers - NLayers parameter, NLayers>0, recommended value to start + with - about 5. + LambdaV - regularization value, can be useful when solving problem + in the least squares sense. Optimal lambda is problem- + dependent and require trial and error. In our experience, + good lambda can be as large as 0.1, and you can use 0.001 + as initial guess. + Default value - 0.01, which is used when LambdaV is not + given. You can specify zero value, but it is not + recommended to do so. -OUTPUT PARAMETERS: - X - X-position - Y - Y-position +TUNING ALGORITHM +In order to use this algorithm you have to choose three parameters: +* initial radius RBase +* number of layers in the model NLayers +* regularization coefficient LambdaV - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey +Initial radius is easy to choose - you can pick any number several times +larger than the average distance between points. Algorithm won't break +down if you choose radius which is too large (model construction time will +increase, but model will be built correctly). + +Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used +by the last layer) will be smaller than the typical distance between +points. In case model error is too large, you can increase number of +layers. Having more layers will make model construction and evaluation +proportionally slower, but it will allow you to have model which precisely +fits your data. From the other side, if you want to suppress noise, you +can DECREASE number of layers to make your model less flexible. + +Regularization coefficient LambdaV controls smoothness of the individual +models built for each layer. We recommend you to use default value in case +you don't want to tune this parameter, because having non-zero LambdaV +accelerates and stabilizes internal iterative algorithm. In case you want +to suppress noise you can use LambdaV as additional parameter (larger +value = more smoothness) to tune. + +TYPICAL ERRORS + +1. Using initial radius which is too large. Memory requirements of the + RBF-ML are roughly proportional to N*Density*RBase^2 (where Density is + an average density of points per unit of the interpolation space). In + the extreme case of the very large RBase we will need O(N^2) units of + memory - and many layers in order to decrease radius to some reasonably + small value. + +2. Using too small number of layers - RBF models with large radius are not + flexible enough to reproduce small variations in the target function. + You need many layers with different radii, from large to small, in + order to have good model. + +3. Using initial radius which is too small. You will get model with + "holes" in the areas which are too far away from interpolation centers. + However, algorithm will work correctly (and quickly) in this case. + +4. Using too many layers - you will get too large and too slow model. This + model will perfectly reproduce your function, but maybe you will be + able to achieve similar results with less layers (and less memory). + + -- ALGLIB -- + Copyright 02.03.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2calc( - pspline2interpolant p, - double t, - double& x, - double& y); +
    void alglib::rbfsetalgomultilayer( + rbfmodel s, + double rbase, + ae_int_t nlayers, + const xparams _params = alglib::xdefault); +void alglib::rbfsetalgomultilayer( + rbfmodel s, + double rbase, + ae_int_t nlayers, + double lambdav, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates derivative, i.e. it returns (dX/dT,dY/dT). +DEPRECATED:since version 3.11 ALGLIB includes new RBF model construction + algorithm, Hierarchical RBF. This algorithm is faster and + requires less memory than QNN and RBF-ML. It is especially good + for large-scale interpolation problems. So, we recommend you to + consider Hierarchical RBF as default option. + +========================================================================== + +This function sets RBF interpolation algorithm. ALGLIB supports several +RBF algorithms with different properties. + +This algorithm is called RBF-QNN and it is good for point sets with +following properties: +a) all points are distinct +b) all points are well separated. +c) points distribution is approximately uniform. There is no "contour + lines", clusters of points, or other small-scale structures. + +Algorithm description: +1) interpolation centers are allocated to data points +2) interpolation radii are calculated as distances to the nearest centers + times Q coefficient (where Q is a value from [0.75,1.50]). +3) after performing (2) radii are transformed in order to avoid situation + when single outlier has very large radius and influences many points + across all dataset. Transformation has following form: + new_r[i] = min(r[i],Z*median(r[])) + where r[i] is I-th radius, median() is a median radius across entire + dataset, Z is user-specified value which controls amount of deviation + from median radius. + +When (a) is violated, we will be unable to build RBF model. When (b) or +(c) are violated, model will be built, but interpolation quality will be +low. See http://www.alglib.net/interpolation/ for more information on this +subject. + +This algorithm is used by default. + +Additional Q parameter controls smoothness properties of the RBF basis: +* Q<0.75 will give perfectly conditioned basis, but terrible smoothness + properties (RBF interpolant will have sharp peaks around function values) +* Q around 1.0 gives good balance between smoothness and condition number +* Q>1.5 will lead to badly conditioned systems and slow convergence of the + underlying linear solver (although smoothness will be very good) +* Q>2.0 will effectively make optimizer useless because it won't converge + within reasonable amount of iterations. It is possible to set such large + Q, but it is advised not to do so. INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). + S - RBF model, initialized by RBFCreate() call + Q - Q parameter, Q>0, recommended value - 1.0 + Z - Z parameter, Z>0, recommended value - 5.0 -OUTPUT PARAMETERS: - X - X-value - DX - X-derivative - Y - Y-value - DY - Y-derivative +NOTE: this function has some serialization-related subtleties. We + recommend you to study serialization examples from ALGLIB Reference + Manual if you want to perform serialization of your models. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2diff( - pspline2interpolant p, - double t, - double& x, - double& dx, - double& y, - double& dy); +
    void alglib::rbfsetalgoqnn( + rbfmodel s, + const xparams _params = alglib::xdefault); +void alglib::rbfsetalgoqnn( + rbfmodel s, + double q, + double z, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates first and second derivative with respect to T. +This function sets constant term (model is a sum of radial basis functions +plus constant). This function won't have effect until next call to +RBFBuildModel(). INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). + S - RBF model, initialized by RBFCreate() call -OUTPUT PARAMETERS: - X - X-value - DX - derivative - D2X - second derivative - Y - Y-value - DY - derivative - D2Y - second derivative +NOTE: this function has some serialization-related subtleties. We + recommend you to study serialization examples from ALGLIB Reference + Manual if you want to perform serialization of your models. + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::rbfsetconstterm( + rbfmodel s, + const xparams _params = alglib::xdefault); - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function sets linear term (model is a sum of radial basis functions +plus linear polynomial). This function won't have effect until next call +to RBFBuildModel(). + +INPUT PARAMETERS: + S - RBF model, initialized by RBFCreate() call + +NOTE: this function has some serialization-related subtleties. We + recommend you to study serialization examples from ALGLIB Reference + Manual if you want to perform serialization of your models. + + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2diff2( - pspline2interpolant p, - double t, - double& x, - double& dx, - double& d2x, - double& y, - double& dy, - double& d2y); +
    void alglib::rbfsetlinterm( + rbfmodel s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function returns vector of parameter values correspoding to points. +This function adds dataset. + +This function overrides results of the previous calls, i.e. multiple calls +of this function will result in only the last set being added. -I.e. for P created from (X[0],Y[0])...(X[N-1],Y[N-1]) and U=TValues(P) we -have - (X[0],Y[0]) = PSpline2Calc(P,U[0]), - (X[1],Y[1]) = PSpline2Calc(P,U[1]), - (X[2],Y[2]) = PSpline2Calc(P,U[2]), - ... +IMPORTANT: ALGLIB version 3.11 and later allows you to specify a set of + per-dimension scales. Interpolation radii are multiplied by the + scale vector. It may be useful if you have mixed spatio-temporal + data (say, a set of 3D slices recorded at different times). + You should call rbfsetpointsandscales() function to use this + feature. INPUT PARAMETERS: - P - parametric spline interpolant + S - RBF model, initialized by rbfcreate() call. + XY - points, array[N,NX+NY]. One row corresponds to one point + in the dataset. First NX elements are coordinates, next + NY elements are function values. Array may be larger than + specified, in this case only leading [N,NX+NY] elements + will be used. + N - number of points in the dataset -OUTPUT PARAMETERS: - N - array size - T - array[0..N-1] +After you've added dataset and (optionally) tuned algorithm settings you +should call rbfbuildmodel() in order to build a model for you. +NOTE: dataset added by this function is not saved during model serialization. + MODEL ITSELF is serialized, but data used to build it are not. -NOTES: -* for non-periodic splines U[0]=0, U[0]<U[1]<...<U[N-1], U[N-1]=1 -* for periodic splines U[0]=0, U[0]<U[1]<...<U[N-1], U[N-1]<1 + So, if you 1) add dataset to empty RBF model, 2) serialize and + unserialize it, then you will get an empty RBF model with no dataset + being attached. + + From the other side, if you call rbfbuildmodel() between (1) and (2), + then after (2) you will get your fully constructed RBF model - but + again with no dataset attached, so subsequent calls to rbfbuildmodel() + will produce empty model. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2parametervalues( - pspline2interpolant p, - ae_int_t& n, - real_1d_array& t); +
    void alglib::rbfsetpoints( + rbfmodel s, + real_2d_array xy, + const xparams _params = alglib::xdefault); +void alglib::rbfsetpoints( + rbfmodel s, + real_2d_array xy, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -This function calculates tangent vector for a given value of parameter T +This function adds dataset and a vector of per-dimension scales. + +It may be useful if you have mixed spatio-temporal data - say, a set of 3D +slices recorded at different times. Such data typically require different +RBF radii for spatial and temporal dimensions. ALGLIB solves this problem +by specifying single RBF radius, which is (optionally) multiplied by the +scale vector. + +This function overrides results of the previous calls, i.e. multiple calls +of this function will result in only the last set being added. + +IMPORTANT: only HierarchicalRBF algorithm can work with scaled points. So, + using this function results in RBF models which can be used in + ALGLIB 3.11 or later. Previous versions of the library will be + unable to unserialize models produced by HierarchicalRBF algo. + + Any attempt to use this function with RBF-ML or QNN algorithms + will result in -3 error code being returned (incorrect + algorithm). INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). + R - RBF model, initialized by rbfcreate() call. + XY - points, array[N,NX+NY]. One row corresponds to one point + in the dataset. First NX elements are coordinates, next + NY elements are function values. Array may be larger than + specified, in this case only leading [N,NX+NY] elements + will be used. + N - number of points in the dataset + S - array[NX], scale vector, S[i]>0. -OUTPUT PARAMETERS: - X - X-component of tangent vector (normalized) - Y - Y-component of tangent vector (normalized) +After you've added dataset and (optionally) tuned algorithm settings you +should call rbfbuildmodel() in order to build a model for you. -NOTE: - X^2+Y^2 is either 1 (for non-zero tangent vector) or 0. +NOTE: dataset added by this function is not saved during model serialization. + MODEL ITSELF is serialized, but data used to build it are not. + So, if you 1) add dataset to empty RBF model, 2) serialize and + unserialize it, then you will get an empty RBF model with no dataset + being attached. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + From the other side, if you call rbfbuildmodel() between (1) and (2), + then after (2) you will get your fully constructed RBF model - but + again with no dataset attached, so subsequent calls to rbfbuildmodel() + will produce empty model. + + + -- ALGLIB -- + Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline2tangent( - pspline2interpolant p, - double t, - double& x, - double& y); +
    void alglib::rbfsetpointsandscales( + rbfmodel r, + real_2d_array xy, + real_1d_array s, + const xparams _params = alglib::xdefault); +void alglib::rbfsetpointsandscales( + rbfmodel r, + real_2d_array xy, + ae_int_t n, + real_1d_array s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates arc length, i.e. length of curve between t=a -and t=b. +This function sets basis function type, which can be: +* 0 for classic Gaussian +* 1 for fast and compact bell-like basis function, which becomes exactly + zero at distance equal to 3*R (default option). INPUT PARAMETERS: - P - parametric spline interpolant - A,B - parameter values corresponding to arc ends: - * B>A will result in positive length returned - * B<A will result in negative length returned - -RESULT: - length of arc starting at T=A and ending at T=B. - + S - RBF model, initialized by RBFCreate() call + BF - basis function type: + * 0 - classic Gaussian + * 1 - fast and compact one - -- ALGLIB PROJECT -- - Copyright 30.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::pspline3arclength( - pspline3interpolant p, - double a, - double b); +
    void alglib::rbfsetv2bf( + rbfmodel s, + ae_int_t bf, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function builds non-periodic 3-dimensional parametric spline which -starts at (X[0],Y[0],Z[0]) and ends at (X[N-1],Y[N-1],Z[N-1]). +This function sets stopping criteria of the underlying linear solver for +hierarchical (version 2) RBF constructor. -Same as PSpline2Build() function, but for 3D, so we won't duplicate its -description here. +INPUT PARAMETERS: + S - RBF model, initialized by RBFCreate() call + MaxIts - this criterion will stop algorithm after MaxIts iterations. + Typically a few hundreds iterations is required, with 400 + being a good default value to start experimentation. + Zero value means that default value will be selected. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline3build( - real_2d_array xy, - ae_int_t n, - ae_int_t st, - ae_int_t pt, - pspline3interpolant& p); +
    void alglib::rbfsetv2its( + rbfmodel s, + ae_int_t maxits, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function builds periodic 3-dimensional parametric spline which -starts at (X[0],Y[0],Z[0]), goes through all points to (X[N-1],Y[N-1],Z[N-1]) -and then back to (X[0],Y[0],Z[0]). +This function sets support radius parameter of hierarchical (version 2) +RBF constructor. -Same as PSpline2Build() function, but for 3D, so we won't duplicate its -description here. +Hierarchical RBF model achieves great speed-up by removing from the model +excessive (too dense) nodes. Say, if you have RBF radius equal to 1 meter, +and two nodes are just 1 millimeter apart, you may remove one of them +without reducing model quality. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey +Support radius parameter is used to justify which points need removal, and +which do not. If two points are less than SUPPORT_R*CUR_RADIUS units of +distance apart, one of them is removed from the model. The larger support +radius is, the faster model construction AND evaluation are. However, +too large values result in "bumpy" models. + +INPUT PARAMETERS: + S - RBF model, initialized by RBFCreate() call + R - support radius coefficient, >=0. + Recommended values are [0.1,0.4] range, with 0.1 being + default value. + + -- ALGLIB -- + Copyright 01.02.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline3buildperiodic( - real_2d_array xy, - ae_int_t n, - ae_int_t st, - ae_int_t pt, - pspline3interpolant& p); +
    void alglib::rbfsetv2supportr( + rbfmodel s, + double r, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates the value of the parametric spline for a given -value of parameter T. +This function sets zero term (model is a sum of radial basis functions +without polynomial term). This function won't have effect until next call +to RBFBuildModel(). INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). - -OUTPUT PARAMETERS: - X - X-position - Y - Y-position - Z - Z-position + S - RBF model, initialized by RBFCreate() call +NOTE: this function has some serialization-related subtleties. We + recommend you to study serialization examples from ALGLIB Reference + Manual if you want to perform serialization of your models. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline3calc( - pspline3interpolant p, - double t, - double& x, - double& y, - double& z); +
    void alglib::rbfsetzeroterm( + rbfmodel s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function calculates derivative, i.e. it returns (dX/dT,dY/dT,dZ/dT). +This function calculates values of the RBF model at the given point, using +external buffer object (internal temporaries of RBF model are not +modified). + +This function allows to use same RBF model object in different threads, +assuming that different threads use different instances of buffer +structure. INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). + S - RBF model, may be shared between different threads + Buf - buffer object created for this particular instance of RBF + model with rbfcreatecalcbuffer(). + X - coordinates, array[NX]. + X may have more than NX elements, in this case only + leading NX will be used. + Y - possibly preallocated array OUTPUT PARAMETERS: - X - X-value - DX - X-derivative - Y - Y-value - DY - Y-derivative - Z - Z-value - DZ - Z-derivative - + Y - function value, array[NY]. Y is not reallocated when it + is larger than NY. - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline3diff( - pspline3interpolant p, - double t, - double& x, - double& dx, - double& y, - double& dy, - double& z, - double& dz); +
    void alglib::rbftscalcbuf( + rbfmodel s, + rbfcalcbuffer buf, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates first and second derivative with respect to T. +This function "unpacks" RBF model by extracting its coefficients. INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). + S - RBF model OUTPUT PARAMETERS: - X - X-value - DX - derivative - D2X - second derivative - Y - Y-value - DY - derivative - D2Y - second derivative - Z - Z-value - DZ - derivative - D2Z - second derivative - + NX - dimensionality of argument + NY - dimensionality of the target function + XWR - model information, array[NC,NX+NY+1]. + One row of the array corresponds to one basis function: + * first NX columns - coordinates of the center + * next NY columns - weights, one per dimension of the + function being modelled + For ModelVersion=1: + * last column - radius, same for all dimensions of + the function being modelled + For ModelVersion=2: + * last NX columns - radii, one per dimension + NC - number of the centers + V - polynomial term , array[NY,NX+1]. One row per one + dimension of the function being modelled. First NX + elements are linear coefficients, V[NX] is equal to the + constant part. + ModelVersion-version of the RBF model: + * 1 - for models created by QNN and RBF-ML algorithms, + compatible with ALGLIB 3.10 or earlier. + * 2 - for models created by HierarchicalRBF, requires + ALGLIB 3.11 or later - -- ALGLIB PROJECT -- - Copyright 28.05.2010 by Bochkanov Sergey + -- ALGLIB -- + Copyright 13.12.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::pspline3diff2( - pspline3interpolant p, - double t, - double& x, - double& dx, - double& d2x, - double& y, - double& dy, - double& d2y, - double& z, - double& dz, - double& d2z); +
    void alglib::rbfunpack( + rbfmodel s, + ae_int_t& nx, + ae_int_t& ny, + real_2d_array& xwr, + ae_int_t& nc, + real_2d_array& v, + ae_int_t& modelversion, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function returns vector of parameter values correspoding to points. +This function unserializes data structure from string. +*************************************************************************/ +
    void rbfunserialize(const std::string &s_in, rbfmodel &obj); +void rbfunserialize(const std::istream &s_in, rbfmodel &obj); +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -Same as PSpline2ParameterValues(), but for 3D.
    +using namespace alglib;
     
    -  -- ALGLIB PROJECT --
    -     Copyright 28.05.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::pspline3parametervalues( - pspline3interpolant p, - ae_int_t& n, - real_1d_array& t); -
    - +int main(int argc, char **argv) +{ + // + // This example illustrates basic concepts of the RBF models: creation, modification, + // evaluation. + // + // Suppose that we have set of 2-dimensional points with associated + // scalar function values, and we want to build a RBF model using + // our data. + // + // NOTE: we can work with 3D models too :) + // + // Typical sequence of steps is given below: + // 1. we create RBF model object + // 2. we attach our dataset to the RBF model and tune algorithm settings + // 3. we rebuild RBF model using QNN algorithm on new data + // 4. we use RBF model (evaluate, serialize, etc.) + // + double v; + + // + // Step 1: RBF model creation. + // + // We have to specify dimensionality of the space (2 or 3) and + // dimensionality of the function (scalar or vector). + // + // New model is empty - it can be evaluated, + // but we just get zero value at any point. + // + rbfmodel model; + rbfcreate(2, 1, model); + + v = rbfcalc2(model, 0.0, 0.0); + printf("%.2f\n", double(v)); // EXPECTED: 0.000 + + // + // Step 2: we add dataset. + // + // XY contains two points - x0=(-1,0) and x1=(+1,0) - + // and two function values f(x0)=2, f(x1)=3. + // + // We added points, but model was not rebuild yet. + // If we call rbfcalc2(), we still will get 0.0 as result. + // + real_2d_array xy = "[[-1,0,2],[+1,0,3]]"; + rbfsetpoints(model, xy); + + v = rbfcalc2(model, 0.0, 0.0); + printf("%.2f\n", double(v)); // EXPECTED: 0.000 + + // + // Step 3: rebuild model + // + // After we've configured model, we should rebuild it - + // it will change coefficients stored internally in the + // rbfmodel structure. + // + // We use hierarchical RBF algorithm with following parameters: + // * RBase - set to 1.0 + // * NLayers - three layers are used (although such simple problem + // does not need more than 1 layer) + // * LambdaReg - is set to zero value, no smoothing is required + // + rbfreport rep; + rbfsetalgohierarchical(model, 1.0, 3, 0.0); + rbfbuildmodel(model, rep); + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 + + // + // Step 4: model was built + // + // After call of rbfbuildmodel(), rbfcalc2() will return + // value of the new model. + // + v = rbfcalc2(model, 0.0, 0.0); + printf("%.2f\n", double(v)); // EXPECTED: 2.500 + return 0; +} + + +
    -
    /************************************************************************* -This function calculates tangent vector for a given value of parameter T +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS: - P - parametric spline interpolant - T - point: - * T in [0,1] corresponds to interval spanned by points - * for non-periodic splines T<0 (or T>1) correspond to parts of - the curve before the first (after the last) point - * for periodic splines T<0 (or T>1) are projected into [0,1] - by making T=T-floor(T). +using namespace alglib; -OUTPUT PARAMETERS: - X - X-component of tangent vector (normalized) - Y - Y-component of tangent vector (normalized) - Z - Z-component of tangent vector (normalized) -NOTE: - X^2+Y^2+Z^2 is either 1 (for non-zero tangent vector) or 0. +int main(int argc, char **argv) +{ + // + // This example show how to work with polynomial term + // + // Suppose that we have set of 2-dimensional points with associated + // scalar function values, and we want to build a RBF model using + // our data. + // + // We use hierarchical RBF algorithm with following parameters: + // * RBase - set to 1.0 + // * NLayers - three layers are used (although such simple problem + // does not need more than 1 layer) + // * LambdaReg - is set to zero value, no smoothing is required + // + double v; + rbfmodel model; + real_2d_array xy = "[[-1,0,2],[+1,0,3]]"; + rbfreport rep; + + rbfcreate(2, 1, model); + rbfsetpoints(model, xy); + rbfsetalgohierarchical(model, 1.0, 3, 0.0); + + // + // By default, RBF model uses linear term. It means that model + // looks like + // f(x,y) = SUM(RBF[i]) + a*x + b*y + c + // where RBF[i] is I-th radial basis function and a*x+by+c is a + // linear term. Having linear terms in a model gives us: + // (1) improved extrapolation properties + // (2) linearity of the model when data can be perfectly fitted + // by the linear function + // (3) linear asymptotic behavior + // + // Our simple dataset can be modelled by the linear function + // f(x,y) = 0.5*x + 2.5 + // and rbfbuildmodel() with default settings should preserve this + // linearity. + // + ae_int_t nx; + ae_int_t ny; + ae_int_t nc; + ae_int_t modelversion; + real_2d_array xwr; + real_2d_array c; + rbfbuildmodel(model, rep); + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 + rbfunpack(model, nx, ny, xwr, nc, c, modelversion); + printf("%s\n", c.tostring(2).c_str()); // EXPECTED: [[0.500,0.000,2.500]] + + // asymptotic behavior of our function is linear + v = rbfcalc2(model, 1000.0, 0.0); + printf("%.1f\n", double(v)); // EXPECTED: 502.50 + + // + // Instead of linear term we can use constant term. In this case + // we will get model which has form + // f(x,y) = SUM(RBF[i]) + c + // where RBF[i] is I-th radial basis function and c is a constant, + // which is equal to the average function value on the dataset. + // + // Because we've already attached dataset to the model the only + // thing we have to do is to call rbfsetconstterm() and then + // rebuild model with rbfbuildmodel(). + // + rbfsetconstterm(model); + rbfbuildmodel(model, rep); + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 + rbfunpack(model, nx, ny, xwr, nc, c, modelversion); + printf("%s\n", c.tostring(2).c_str()); // EXPECTED: [[0.000,0.000,2.500]] + + // asymptotic behavior of our function is constant + v = rbfcalc2(model, 1000.0, 0.0); + printf("%.2f\n", double(v)); // EXPECTED: 2.500 + + // + // Finally, we can use zero term. Just plain RBF without polynomial + // part: + // f(x,y) = SUM(RBF[i]) + // where RBF[i] is I-th radial basis function. + // + rbfsetzeroterm(model); + rbfbuildmodel(model, rep); + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 + rbfunpack(model, nx, ny, xwr, nc, c, modelversion); + printf("%s\n", c.tostring(2).c_str()); // EXPECTED: [[0.000,0.000,0.000]] + + // asymptotic behavior of our function is just zero constant + v = rbfcalc2(model, 1000.0, 0.0); + printf("%.2f\n", double(v)); // EXPECTED: 0.000 + return 0; +} + + +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example show how to serialize and unserialize RBF model
    +    // 
    +    // Suppose that we have set of 2-dimensional points with associated
    +    // scalar function values, and we want to build a RBF model using
    +    // our data. Then we want to serialize it to string and to unserialize
    +    // from string, loading to another instance of RBF model.
    +    //
    +    // Here we assume that you already know how to create RBF models.
    +    //
    +    std::string s;
    +    double v;
    +    rbfmodel model0;
    +    rbfmodel model1;
    +    real_2d_array xy = "[[-1,0,2],[+1,0,3]]";
    +    rbfreport rep;
    +
    +    // model initialization
    +    rbfcreate(2, 1, model0);
    +    rbfsetpoints(model0, xy);
    +    rbfsetalgohierarchical(model0, 1.0, 3, 0.0);
    +    rbfbuildmodel(model0, rep);
    +    printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1
    +
    +    //
    +    // Serialization - it looks easy,
    +    // but you should carefully read next section.
    +    //
    +    alglib::rbfserialize(model0, s);
    +    alglib::rbfunserialize(s, model1);
    +
    +    // both models return same value
    +    v = rbfcalc2(model0, 0.0, 0.0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 2.500
    +    v = rbfcalc2(model1, 0.0, 0.0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 2.500
    +
    +    //
    +    // Previous section shows that model state is saved/restored during
    +    // serialization. However, some properties are NOT serialized.
    +    //
    +    // Serialization saves/restores RBF model, but it does NOT saves/restores
    +    // settings which were used to build current model. In particular, dataset
    +    // which was used to build model, is not preserved.
    +    //
    +    // What does it mean in for us?
    +    //
    +    // Do you remember this sequence: rbfcreate-rbfsetpoints-rbfbuildmodel?
    +    // First step creates model, second step adds dataset and tunes model
    +    // settings, third step builds model using current dataset and model
    +    // construction settings.
    +    //
    +    // If you call rbfbuildmodel() without calling rbfsetpoints() first, you
    +    // will get empty (zero) RBF model. In our example, model0 contains
    +    // dataset which was added by rbfsetpoints() call. However, model1 does
    +    // NOT contain dataset - because dataset is NOT serialized.
    +    //
    +    // This, if we call rbfbuildmodel(model0,rep), we will get same model,
    +    // which returns 2.5 at (x,y)=(0,0). However, after same call model1 will
    +    // return zero - because it contains RBF model (coefficients), but does NOT
    +    // contain dataset which was used to build this model.
    +    //
    +    // Basically, it means that:
    +    // * serialization of the RBF model preserves anything related to the model
    +    //   EVALUATION
    +    // * but it does NOT creates perfect copy of the original object.
    +    //
    +    rbfbuildmodel(model0, rep);
    +    v = rbfcalc2(model0, 0.0, 0.0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 2.500
     
    +    rbfbuildmodel(model1, rep);
    +    v = rbfcalc2(model1, 0.0, 0.0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 0.000
    +    return 0;
    +}
     
    -  -- ALGLIB PROJECT --
    -     Copyright 28.05.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::pspline3tangent( - pspline3interpolant p, - double t, - double& x, - double& y, - double& z); -
    - +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -38133,5801 +50036,5789 @@
     int main(int argc, char **argv)
     {
         //
    -    // We use RDP algorithm to approximate parametric 2D curve given by
    -    // locations in t=0,1,2,3 (see below), which form piecewise linear
    -    // trajectory through D-dimensional space (2-dimensional in our example).
    +    // Suppose that we have set of 2-dimensional points with associated VECTOR
    +    // function values, and we want to build a RBF model using our data.
         // 
    -    //     |
    -    //     |
    -    //     -     *     *     X2................X3
    -    //     |                .
    -    //     |               .
    -    //     -     *     *  .  *     *     *     *
    -    //     |             .
    -    //     |            .
    -    //     -     *     X1    *     *     *     *
    -    //     |      .....
    -    //     |  ....
    -    //     X0----|-----|-----|-----|-----|-----|---
    +    // Typical sequence of steps is given below:
    +    // 1. we create RBF model object
    +    // 2. we attach our dataset to the RBF model and tune algorithm settings
    +    // 3. we rebuild RBF model using new data
    +    // 4. we use RBF model (evaluate, serialize, etc.)
         //
    -    ae_int_t npoints = 4;
    -    ae_int_t ndimensions = 2;
    -    real_2d_array x = "[[0,0],[2,1],[3,3],[6,3]]";
    +    real_1d_array x;
    +    real_1d_array y;
     
         //
    -    // Approximation of parametric curve is performed by another parametric curve
    -    // with lesser amount of points. It allows to work with "compressed"
    -    // representation, which needs smaller amount of memory. Say, in our example
    -    // (we allow points with error smaller than 0.8) approximation will have
    -    // just two sequential sections connecting X0 with X2, and X2 with X3.
    -    // 
    -    //     |
    -    //     |
    -    //     -     *     *     X2................X3
    -    //     |               . 
    -    //     |             .  
    -    //     -     *     .     *     *     *     *
    -    //     |         .    
    -    //     |       .     
    -    //     -     .     X1    *     *     *     *
    -    //     |   .       
    -    //     | .    
    -    //     X0----|-----|-----|-----|-----|-----|---
    +    // Step 1: RBF model creation.
         //
    +    // We have to specify dimensionality of the space (equal to 2) and
    +    // dimensionality of the function (2-dimensional vector function).
         //
    -    real_2d_array y;
    -    integer_1d_array idxy;
    -    ae_int_t nsections;
    -    ae_int_t limitcnt = 0;
    -    double limiteps = 0.8;
    -    parametricrdpfixed(x, npoints, ndimensions, limitcnt, limiteps, y, idxy, nsections);
    -    printf("%d\n", int(nsections)); // EXPECTED: 2
    -    printf("%s\n", idxy.tostring().c_str()); // EXPECTED: [0,2,3]
    -    return 0;
    -}
    -
    -
    -
    -
    - -pcabuildbasis
    - - -
    - -
    -
    /************************************************************************* -Principal components analysis - -Subroutine builds orthogonal basis where first axis corresponds to -direction with maximum variance, second axis maximizes variance in subspace -orthogonal to first axis and so on. + // New model is empty - it can be evaluated, + // but we just get zero value at any point. + // + rbfmodel model; + rbfcreate(2, 2, model); -It should be noted that, unlike LDA, PCA does not use class labels. + x = "[+1,+1]"; + rbfcalc(model, x, y); + printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [0.000,0.000] -COMMERCIAL EDITION OF ALGLIB: + // + // Step 2: we add dataset. + // + // XY arrays containt four points: + // * (x0,y0) = (+1,+1), f(x0,y0)=(0,-1) + // * (x1,y1) = (+1,-1), f(x1,y1)=(-1,0) + // * (x2,y2) = (-1,-1), f(x2,y2)=(0,+1) + // * (x3,y3) = (-1,+1), f(x3,y3)=(+1,0) + // + real_2d_array xy = "[[+1,+1,0,-1],[+1,-1,-1,0],[-1,-1,0,+1],[-1,+1,+1,0]]"; + rbfsetpoints(model, xy); - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Best results are achieved for high-dimensional problems - ! (NVars is at least 256). - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + // We added points, but model was not rebuild yet. + // If we call rbfcalc(), we still will get 0.0 as result. + rbfcalc(model, x, y); + printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [0.000,0.000] -INPUT PARAMETERS: - X - dataset, array[0..NPoints-1,0..NVars-1]. - matrix contains ONLY INDEPENDENT VARIABLES. - NPoints - dataset size, NPoints>=0 - NVars - number of independent variables, NVars>=1 + // + // Step 3: rebuild model + // + // We use hierarchical RBF algorithm with following parameters: + // * RBase - set to 1.0 + // * NLayers - three layers are used (although such simple problem + // does not need more than 1 layer) + // * LambdaReg - is set to zero value, no smoothing is required + // + // After we've configured model, we should rebuild it - + // it will change coefficients stored internally in the + // rbfmodel structure. + // + rbfreport rep; + rbfsetalgohierarchical(model, 1.0, 3, 0.0); + rbfbuildmodel(model, rep); + printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 -OUTPUT PARAMETERS: - Info - return code: - * -4, if SVD subroutine haven't converged - * -1, if wrong parameters has been passed (NPoints<0, - NVars<1) - * 1, if task is solved - S2 - array[0..NVars-1]. variance values corresponding - to basis vectors. - V - array[0..NVars-1,0..NVars-1] - matrix, whose columns store basis vectors. + // + // Step 4: model was built + // + // After call of rbfbuildmodel(), rbfcalc() will return + // value of the new model. + // + rbfcalc(model, x, y); + printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [0.000,-1.000] + return 0; +} - -- ALGLIB -- - Copyright 25.08.2008 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::pcabuildbasis( - real_2d_array x, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t& info, - real_1d_array& s2, - real_2d_array& v); -
    - + - +
     
    /************************************************************************* -Inverse Poisson distribution - -Finds the Poisson variable x such that the integral -from 0 to x of the Poisson density is equal to the -given probability y. - -This is accomplished using the inverse gamma integral -function and the relation +Estimate of the condition number of a matrix given by its LU decomposition (1-norm) - m = igami( k+1, y ). +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -ACCURACY: +Input parameters: + LUA - LU decomposition of a matrix in compact form. Output of + the CMatrixLU subroutine. + N - size of matrix A. -See inverse incomplete gamma function +Result: 1/LowerBound(cond(A)) -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    double alglib::invpoissondistribution(ae_int_t k, double y); +
    double alglib::cmatrixlurcond1( + complex_2d_array lua, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Complemented Poisson distribution - -Returns the sum of the terms k+1 to infinity of the Poisson -distribution: - - inf. j - -- -m m - > e -- - -- j! - j=k+1 - -The terms are not summed directly; instead the incomplete -gamma integral is employed, according to the formula - -y = pdtrc( k, m ) = igam( k+1, m ). +Estimate of the condition number of a matrix given by its LU decomposition +(infinity norm). -The arguments must both be positive. +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -ACCURACY: +Input parameters: + LUA - LU decomposition of a matrix in compact form. Output of + the CMatrixLU subroutine. + N - size of matrix A. -See incomplete gamma function +Result: 1/LowerBound(cond(A)) -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    double alglib::poissoncdistribution(ae_int_t k, double m); +
    double alglib::cmatrixlurcondinf( + complex_2d_array lua, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Poisson distribution - -Returns the sum of the first k+1 terms of the Poisson -distribution: - - k j - -- -m m - > e -- - -- j! - j=0 - -The terms are not summed directly; instead the incomplete -gamma integral is employed, according to the relation +Estimate of a matrix condition number (1-norm) -y = pdtr( k, m ) = igamc( k+1, m ). +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -The arguments must both be positive. -ACCURACY: +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. -See incomplete gamma function +Result: 1/LowerBound(cond(A)) -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    double alglib::poissondistribution(ae_int_t k, double m); +
    double alglib::cmatrixrcond1( + complex_2d_array a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - -
    - -polynomialbar2cheb
    -polynomialbar2pow
    -polynomialbuild
    -polynomialbuildcheb1
    -polynomialbuildcheb2
    -polynomialbuildeqdist
    -polynomialcalccheb1
    -polynomialcalccheb2
    -polynomialcalceqdist
    -polynomialcheb2bar
    -polynomialpow2bar
    - - - - - -
    polint_d_calcdiff Interpolation and differentiation using barycentric representation
    polint_d_conv Conversion between power basis and barycentric representation
    polint_d_spec Polynomial interpolation on special grids (equidistant, Chebyshev I/II)
    - +
     
    /************************************************************************* -Conversion from barycentric representation to Chebyshev basis. -This function has O(N^2) complexity. +Estimate of a matrix condition number (infinity-norm). -INPUT PARAMETERS: - P - polynomial in barycentric form - A,B - base interval for Chebyshev polynomials (see below) - A<>B +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -OUTPUT PARAMETERS - T - coefficients of Chebyshev representation; - P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N-1 }, - where Ti - I-th Chebyshev polynomial. +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. -NOTES: - barycentric interpolant passed as P may be either polynomial obtained - from polynomial interpolation/ fitting or rational function which is - NOT polynomial. We can't distinguish between these two cases, and this - algorithm just tries to work assuming that P IS a polynomial. If not, - algorithm will return results, but they won't have any meaning. +Result: 1/LowerBound(cond(A)) - -- ALGLIB -- - Copyright 30.09.2010 by Bochkanov Sergey +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialbar2cheb( - barycentricinterpolant p, - double a, - double b, - real_1d_array& t); +
    double alglib::cmatrixrcondinf( + complex_2d_array a, + ae_int_t n, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Conversion from barycentric representation to power basis. -This function has O(N^2) complexity. - -INPUT PARAMETERS: - P - polynomial in barycentric form - C - offset (see below); 0.0 is used as default value. - S - scale (see below); 1.0 is used as default value. S<>0. - -OUTPUT PARAMETERS - A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } - N - number of coefficients (polynomial degree plus 1) - -NOTES: -1. this function accepts offset and scale, which can be set to improve - numerical properties of polynomial. For example, if P was obtained as - result of interpolation on [-1,+1], you can set C=0 and S=1 and - represent P as sum of 1, x, x^2, x^3 and so on. In most cases you it - is exactly what you need. +Triangular matrix: estimate of a condition number (1-norm) - However, if your interpolation model was built on [999,1001], you will - see significant growth of numerical errors when using {1, x, x^2, x^3} - as basis. Representing P as sum of 1, (x-1000), (x-1000)^2, (x-1000)^3 - will be better option. Such representation can be obtained by using - 1000.0 as offset C and 1.0 as scale S. +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -2. power basis is ill-conditioned and tricks described above can't solve - this problem completely. This function will return coefficients in - any case, but for N>8 they will become unreliable. However, N's - less than 5 are pretty safe. +Input parameters: + A - matrix. Array[0..N-1, 0..N-1]. + N - size of A. + IsUpper - True, if the matrix is upper triangular. + IsUnit - True, if the matrix has a unit diagonal. -3. barycentric interpolant passed as P may be either polynomial obtained - from polynomial interpolation/ fitting or rational function which is - NOT polynomial. We can't distinguish between these two cases, and this - algorithm just tries to work assuming that P IS a polynomial. If not, - algorithm will return results, but they won't have any meaning. +Result: 1/LowerBound(cond(A)) - -- ALGLIB -- - Copyright 30.09.2010 by Bochkanov Sergey +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialbar2pow( - barycentricinterpolant p, - real_1d_array& a); -void alglib::polynomialbar2pow( - barycentricinterpolant p, - double c, - double s, - real_1d_array& a); +
    double alglib::cmatrixtrrcond1( + complex_2d_array a, + ae_int_t n, + bool isupper, + bool isunit, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Lagrange intepolant: generation of the model on the general grid. -This function has O(N^2) complexity. +Triangular matrix: estimate of a matrix condition number (infinity-norm). -INPUT PARAMETERS: - X - abscissas, array[0..N-1] - Y - function values, array[0..N-1] - N - number of points, N>=1 +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -OUTPUT PARAMETERS - P - barycentric model which represents Lagrange interpolant - (see ratint unit info and BarycentricCalc() description for - more information). +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - True, if the matrix is upper triangular. + IsUnit - True, if the matrix has a unit diagonal. - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +Result: 1/LowerBound(cond(A)) + +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialbuild( - real_1d_array x, - real_1d_array y, - barycentricinterpolant& p); -void alglib::polynomialbuild( - real_1d_array x, - real_1d_array y, +
    double alglib::cmatrixtrrcondinf( + complex_2d_array a, ae_int_t n, - barycentricinterpolant& p); + bool isupper, + bool isunit, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Lagrange intepolant on Chebyshev grid (first kind). -This function has O(N) complexity. +Condition number estimate of a Hermitian positive definite matrix given by +Cholesky decomposition. -INPUT PARAMETERS: - A - left boundary of [A,B] - B - right boundary of [A,B] - Y - function values at the nodes, array[0..N-1], - Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n))) - N - number of points, N>=1 - for N=1 a constant model is constructed. +The algorithm calculates a lower bound of the condition number. In this +case, the algorithm does not return a lower bound of the condition number, +but an inverse number (to avoid an overflow in case of a singular matrix). -OUTPUT PARAMETERS - P - barycentric model which represents Lagrange interpolant - (see ratint unit info and BarycentricCalc() description for - more information). +It should be noted that 1-norm and inf-norm condition numbers of symmetric +matrices are equal, so the algorithm doesn't take into account the +differences between these types of norms. - -- ALGLIB -- - Copyright 03.12.2009 by Bochkanov Sergey +Input parameters: + CD - Cholesky decomposition of matrix A, + output of SMatrixCholesky subroutine. + N - size of matrix A. + +Result: 1/LowerBound(cond(A)) + +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialbuildcheb1( - double a, - double b, - real_1d_array y, - barycentricinterpolant& p); -void alglib::polynomialbuildcheb1( - double a, - double b, - real_1d_array y, +
    double alglib::hpdmatrixcholeskyrcond( + complex_2d_array a, ae_int_t n, - barycentricinterpolant& p); + bool isupper, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Lagrange intepolant on Chebyshev grid (second kind). -This function has O(N) complexity. +Condition number estimate of a Hermitian positive definite matrix. -INPUT PARAMETERS: - A - left boundary of [A,B] - B - right boundary of [A,B] - Y - function values at the nodes, array[0..N-1], - Y[I] = Y(0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1))) - N - number of points, N>=1 - for N=1 a constant model is constructed. +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -OUTPUT PARAMETERS - P - barycentric model which represents Lagrange interpolant - (see ratint unit info and BarycentricCalc() description for - more information). +It should be noted that 1-norm and inf-norm of condition numbers of symmetric +matrices are equal, so the algorithm doesn't take into account the +differences between these types of norms. - -- ALGLIB -- - Copyright 03.12.2009 by Bochkanov Sergey +Input parameters: + A - Hermitian positive definite matrix which is given by its + upper or lower triangle depending on the value of + IsUpper. Array with elements [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - storage format. + +Result: + 1/LowerBound(cond(A)), if matrix A is positive definite, + -1, if matrix A is not positive definite, and its condition number + could not be found by this algorithm. + +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialbuildcheb2( - double a, - double b, - real_1d_array y, - barycentricinterpolant& p); -void alglib::polynomialbuildcheb2( - double a, - double b, - real_1d_array y, +
    double alglib::hpdmatrixrcond( + complex_2d_array a, ae_int_t n, - barycentricinterpolant& p); + bool isupper, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Lagrange intepolant: generation of the model on equidistant grid. -This function has O(N) complexity. +Estimate of the condition number of a matrix given by its LU decomposition (1-norm) -INPUT PARAMETERS: - A - left boundary of [A,B] - B - right boundary of [A,B] - Y - function values at the nodes, array[0..N-1] - N - number of points, N>=1 - for N=1 a constant model is constructed. +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -OUTPUT PARAMETERS - P - barycentric model which represents Lagrange interpolant - (see ratint unit info and BarycentricCalc() description for - more information). +Input parameters: + LUA - LU decomposition of a matrix in compact form. Output of + the RMatrixLU subroutine. + N - size of matrix A. - -- ALGLIB -- - Copyright 03.12.2009 by Bochkanov Sergey +Result: 1/LowerBound(cond(A)) + +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialbuildeqdist( - double a, - double b, - real_1d_array y, - barycentricinterpolant& p); -void alglib::polynomialbuildeqdist( - double a, - double b, - real_1d_array y, +
    double alglib::rmatrixlurcond1( + real_2d_array lua, ae_int_t n, - barycentricinterpolant& p); + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Fast polynomial interpolation function on Chebyshev points (first kind) -with O(N) complexity. +Estimate of the condition number of a matrix given by its LU decomposition +(infinity norm). -INPUT PARAMETERS: - A - left boundary of [A,B] - B - right boundary of [A,B] - F - function values, array[0..N-1] - N - number of points on Chebyshev grid (first kind), - X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*(2*i+1)/(2*n)) - for N=1 a constant model is constructed. - T - position where P(x) is calculated +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -RESULT - value of the Lagrange interpolant at T +Input parameters: + LUA - LU decomposition of a matrix in compact form. Output of + the RMatrixLU subroutine. + N - size of matrix A. -IMPORTANT - this function provides fast interface which is not overflow-safe - nor it is very precise. - the best option is to use PolIntBuildCheb1()/BarycentricCalc() - subroutines unless you are pretty sure that your data will not result - in overflow. +Result: 1/LowerBound(cond(A)) - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    double alglib::polynomialcalccheb1( - double a, - double b, - real_1d_array f, - double t); -double alglib::polynomialcalccheb1( - double a, - double b, - real_1d_array f, +
    double alglib::rmatrixlurcondinf( + real_2d_array lua, ae_int_t n, - double t); + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Fast polynomial interpolation function on Chebyshev points (second kind) -with O(N) complexity. +Estimate of a matrix condition number (1-norm) -INPUT PARAMETERS: - A - left boundary of [A,B] - B - right boundary of [A,B] - F - function values, array[0..N-1] - N - number of points on Chebyshev grid (second kind), - X[i] = 0.5*(B+A) + 0.5*(B-A)*Cos(PI*i/(n-1)) - for N=1 a constant model is constructed. - T - position where P(x) is calculated +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -RESULT - value of the Lagrange interpolant at T +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. -IMPORTANT - this function provides fast interface which is not overflow-safe - nor it is very precise. - the best option is to use PolIntBuildCheb2()/BarycentricCalc() - subroutines unless you are pretty sure that your data will not result - in overflow. +Result: 1/LowerBound(cond(A)) - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    double alglib::polynomialcalccheb2( - double a, - double b, - real_1d_array f, - double t); -double alglib::polynomialcalccheb2( - double a, - double b, - real_1d_array f, +
    double alglib::rmatrixrcond1( + real_2d_array a, ae_int_t n, - double t); + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Fast equidistant polynomial interpolation function with O(N) complexity +Estimate of a matrix condition number (infinity-norm). -INPUT PARAMETERS: - A - left boundary of [A,B] - B - right boundary of [A,B] - F - function values, array[0..N-1] - N - number of points on equidistant grid, N>=1 - for N=1 a constant model is constructed. - T - position where P(x) is calculated +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -RESULT - value of the Lagrange interpolant at T +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. -IMPORTANT - this function provides fast interface which is not overflow-safe - nor it is very precise. - the best option is to use PolynomialBuildEqDist()/BarycentricCalc() - subroutines unless you are pretty sure that your data will not result - in overflow. +Result: 1/LowerBound(cond(A)) - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    double alglib::polynomialcalceqdist( - double a, - double b, - real_1d_array f, - double t); -double alglib::polynomialcalceqdist( - double a, - double b, - real_1d_array f, +
    double alglib::rmatrixrcondinf( + real_2d_array a, ae_int_t n, - double t); + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -Conversion from Chebyshev basis to barycentric representation. -This function has O(N^2) complexity. +Triangular matrix: estimate of a condition number (1-norm) -INPUT PARAMETERS: - T - coefficients of Chebyshev representation; - P(x) = sum { T[i]*Ti(2*(x-A)/(B-A)-1), i=0..N }, - where Ti - I-th Chebyshev polynomial. - N - number of coefficients: - * if given, only leading N elements of T are used - * if not given, automatically determined from size of T - A,B - base interval for Chebyshev polynomials (see above) - A<B +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). -OUTPUT PARAMETERS - P - polynomial in barycentric form +Input parameters: + A - matrix. Array[0..N-1, 0..N-1]. + N - size of A. + IsUpper - True, if the matrix is upper triangular. + IsUnit - True, if the matrix has a unit diagonal. - -- ALGLIB -- - Copyright 30.09.2010 by Bochkanov Sergey +Result: 1/LowerBound(cond(A)) + +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialcheb2bar( - real_1d_array t, - double a, - double b, - barycentricinterpolant& p); -void alglib::polynomialcheb2bar( - real_1d_array t, +
    double alglib::rmatrixtrrcond1( + real_2d_array a, ae_int_t n, - double a, - double b, - barycentricinterpolant& p); + bool isupper, + bool isunit, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Conversion from power basis to barycentric representation. -This function has O(N^2) complexity. - -INPUT PARAMETERS: - A - coefficients, P(x) = sum { A[i]*((X-C)/S)^i, i=0..N-1 } - N - number of coefficients (polynomial degree plus 1) - * if given, only leading N elements of A are used - * if not given, automatically determined from size of A - C - offset (see below); 0.0 is used as default value. - S - scale (see below); 1.0 is used as default value. S<>0. - -OUTPUT PARAMETERS - P - polynomial in barycentric form - +Triangular matrix: estimate of a matrix condition number (infinity-norm). -NOTES: -1. this function accepts offset and scale, which can be set to improve - numerical properties of polynomial. For example, if you interpolate on - [-1,+1], you can set C=0 and S=1 and convert from sum of 1, x, x^2, - x^3 and so on. In most cases you it is exactly what you need. +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). - However, if your interpolation model was built on [999,1001], you will - see significant growth of numerical errors when using {1, x, x^2, x^3} - as input basis. Converting from sum of 1, (x-1000), (x-1000)^2, - (x-1000)^3 will be better option (you have to specify 1000.0 as offset - C and 1.0 as scale S). +Input parameters: + A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - True, if the matrix is upper triangular. + IsUnit - True, if the matrix has a unit diagonal. -2. power basis is ill-conditioned and tricks described above can't solve - this problem completely. This function will return barycentric model - in any case, but for N>8 accuracy well degrade. However, N's less than - 5 are pretty safe. +Result: 1/LowerBound(cond(A)) - -- ALGLIB -- - Copyright 30.09.2010 by Bochkanov Sergey +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. *************************************************************************/ -
    void alglib::polynomialpow2bar( - real_1d_array a, - barycentricinterpolant& p); -void alglib::polynomialpow2bar( - real_1d_array a, +
    double alglib::rmatrixtrrcondinf( + real_2d_array a, ae_int_t n, - double c, - double s, - barycentricinterpolant& p); + bool isupper, + bool isunit, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // Here we demonstrate polynomial interpolation and differentiation
    -    // of y=x^2-x sampled at [0,1,2]. Barycentric representation of polynomial is used.
    -    //
    -    real_1d_array x = "[0,1,2]";
    -    real_1d_array y = "[0,0,2]";
    -    double t = -1;
    -    double v;
    -    double dv;
    -    double d2v;
    -    barycentricinterpolant p;
    +
    /************************************************************************* +Condition number estimate of a symmetric positive definite matrix given by +Cholesky decomposition. - // barycentric model is created - polynomialbuild(x, y, p); +The algorithm calculates a lower bound of the condition number. In this +case, the algorithm does not return a lower bound of the condition number, +but an inverse number (to avoid an overflow in case of a singular matrix). - // barycentric interpolation is demonstrated - v = barycentriccalc(p, t); - printf("%.4f\n", double(v)); // EXPECTED: 2.0 +It should be noted that 1-norm and inf-norm condition numbers of symmetric +matrices are equal, so the algorithm doesn't take into account the +differences between these types of norms. - // barycentric differentation is demonstrated - barycentricdiff1(p, t, v, dv); - printf("%.4f\n", double(v)); // EXPECTED: 2.0 - printf("%.4f\n", double(dv)); // EXPECTED: -3.0 +Input parameters: + CD - Cholesky decomposition of matrix A, + output of SMatrixCholesky subroutine. + N - size of matrix A. - // second derivatives with barycentric representation - barycentricdiff1(p, t, v, dv); - printf("%.4f\n", double(v)); // EXPECTED: 2.0 - printf("%.4f\n", double(dv)); // EXPECTED: -3.0 - barycentricdiff2(p, t, v, dv, d2v); - printf("%.4f\n", double(v)); // EXPECTED: 2.0 - printf("%.4f\n", double(dv)); // EXPECTED: -3.0 - printf("%.4f\n", double(d2v)); // EXPECTED: 2.0 - return 0; -} +Result: 1/LowerBound(cond(A)) +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. +*************************************************************************/ +
    double alglib::spdmatrixcholeskyrcond( + real_2d_array a, + ae_int_t n, + bool isupper, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    -
    +
    /************************************************************************* +Condition number estimate of a symmetric positive definite matrix. -int main(int argc, char **argv) -{ - // - // Here we demonstrate conversion of y=x^2-x - // between power basis and barycentric representation. - // - real_1d_array a = "[0,-1,+1]"; - double t = 2; - real_1d_array a2; - double v; - barycentricinterpolant p; +The algorithm calculates a lower bound of the condition number. In this case, +the algorithm does not return a lower bound of the condition number, but an +inverse number (to avoid an overflow in case of a singular matrix). - // - // a=[0,-1,+1] is decomposition of y=x^2-x in the power basis: - // - // y = 0 - 1*x + 1*x^2 - // - // We convert it to the barycentric form. - // - polynomialpow2bar(a, p); +It should be noted that 1-norm and inf-norm of condition numbers of symmetric +matrices are equal, so the algorithm doesn't take into account the +differences between these types of norms. - // now we have barycentric interpolation; we can use it for interpolation - v = barycentriccalc(p, t); - printf("%.2f\n", double(v)); // EXPECTED: 2.0 +Input parameters: + A - symmetric positive definite matrix which is given by its + upper or lower triangle depending on the value of + IsUpper. Array with elements [0..N-1, 0..N-1]. + N - size of matrix A. + IsUpper - storage format. - // we can also convert back from barycentric representation to power basis - polynomialbar2pow(p, a2); - printf("%s\n", a2.tostring(2).c_str()); // EXPECTED: [0,-1,+1] - return 0; -} +Result: + 1/LowerBound(cond(A)), if matrix A is positive definite, + -1, if matrix A is not positive definite, and its condition number + could not be found by this algorithm. +NOTE: + if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, + 0.0 is returned in such cases. +*************************************************************************/ +
    double alglib::spdmatrixrcond( + real_2d_array a, + ae_int_t n, + bool isupper, + const xparams _params = alglib::xdefault); -
    + + +
    + +rmatrixschur
    + + +
    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +Subroutine performing the Schur decomposition of a general matrix by using +the QR algorithm with multiple shifts. +COMMERCIAL EDITION OF ALGLIB: -int main(int argc, char **argv) -{ - // - // Temporaries: - // * values of y=x^2-x sampled at three special grids: - // * equdistant grid spanning [0,2], x[i] = 2*i/(N-1), i=0..N-1 - // * Chebyshev-I grid spanning [-1,+1], x[i] = 1 + Cos(PI*(2*i+1)/(2*n)), i=0..N-1 - // * Chebyshev-II grid spanning [-1,+1], x[i] = 1 + Cos(PI*i/(n-1)), i=0..N-1 - // * barycentric interpolants for these three grids - // * vectors to store coefficients of quadratic representation - // - real_1d_array y_eqdist = "[0,0,2]"; - real_1d_array y_cheb1 = "[-0.116025,0.000000,1.616025]"; - real_1d_array y_cheb2 = "[0,0,2]"; - barycentricinterpolant p_eqdist; - barycentricinterpolant p_cheb1; - barycentricinterpolant p_cheb2; - real_1d_array a_eqdist; - real_1d_array a_cheb1; - real_1d_array a_cheb2; + ! Commercial version of ALGLIB includes one important improvement of + ! this function, which can be used from C++ and C#: + ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) + ! + ! Intel MKL gives approximately constant (with respect to number of + ! worker threads) acceleration factor which depends on CPU being used, + ! problem size and "baseline" ALGLIB edition which is used for + ! comparison. + ! + ! Multithreaded acceleration is NOT supported for this function. + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - // - // First, we demonstrate construction of barycentric interpolants on - // special grids. We unpack power representation to ensure that - // interpolant was built correctly. - // - // In all three cases we should get same quadratic function. - // - polynomialbuildeqdist(0.0, 2.0, y_eqdist, p_eqdist); - polynomialbar2pow(p_eqdist, a_eqdist); - printf("%s\n", a_eqdist.tostring(4).c_str()); // EXPECTED: [0,-1,+1] +The source matrix A is represented as S'*A*S = T, where S is an orthogonal +matrix (Schur vectors), T - upper quasi-triangular matrix (with blocks of +sizes 1x1 and 2x2 on the main diagonal). - polynomialbuildcheb1(-1, +1, y_cheb1, p_cheb1); - polynomialbar2pow(p_cheb1, a_cheb1); - printf("%s\n", a_cheb1.tostring(4).c_str()); // EXPECTED: [0,-1,+1] +Input parameters: + A - matrix to be decomposed. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of A, N>=0. - polynomialbuildcheb2(-1, +1, y_cheb2, p_cheb2); - polynomialbar2pow(p_cheb2, a_cheb2); - printf("%s\n", a_cheb2.tostring(4).c_str()); // EXPECTED: [0,-1,+1] - // - // Now we demonstrate polynomial interpolation without construction - // of the barycentricinterpolant structure. - // - // We calculate interpolant value at x=-2. - // In all three cases we should get same f=6 - // - double t = -2; - double v; - v = polynomialcalceqdist(0.0, 2.0, y_eqdist, t); - printf("%.4f\n", double(v)); // EXPECTED: 6.0 +Output parameters: + A - contains matrix T. + Array whose indexes range within [0..N-1, 0..N-1]. + S - contains Schur vectors. + Array whose indexes range within [0..N-1, 0..N-1]. - v = polynomialcalccheb1(-1, +1, y_cheb1, t); - printf("%.4f\n", double(v)); // EXPECTED: 6.0 +Note 1: + The block structure of matrix T can be easily recognized: since all + the elements below the blocks are zeros, the elements a[i+1,i] which + are equal to 0 show the block border. - v = polynomialcalccheb2(-1, +1, y_cheb2, t); - printf("%.4f\n", double(v)); // EXPECTED: 6.0 - return 0; -} +Note 2: + The algorithm performance depends on the value of the internal parameter + NS of the InternalSchurDecomposition subroutine which defines the number + of shifts in the QR algorithm (similarly to the block width in block-matrix + algorithms in linear algebra). If you require maximum performance on + your machine, it is recommended to adjust this parameter manually. + +Result: + True, + if the algorithm has converged and parameters A and S contain the result. + False, + if the algorithm has not converged. +Algorithm implemented on the basis of the DHSEQR subroutine (LAPACK 3.0 library). +*************************************************************************/ +
    bool alglib::rmatrixschur( + real_2d_array& a, + ae_int_t n, + real_2d_array& s, + const xparams _params = alglib::xdefault); -
    + + - +
     
    /************************************************************************* +Temporary buffers for sparse matrix operations. +You should pass an instance of this structure to factorization functions. +It allows to reuse memory during repeated sparse factorizations. You do +not have to call some initialization function - simply passing an instance +to factorization function is enough. *************************************************************************/ -
    class polynomialsolverreport +
    class sparsebuffers { - double maxerr; };
    - +
     
    /************************************************************************* -Polynomial root finding. +Sparse matrix structure. -This function returns all roots of the polynomial - P(x) = a0 + a1*x + a2*x^2 + ... + an*x^n -Both real and complex roots are returned (see below). +You should use ALGLIB functions to work with sparse matrix. Never try to +access its fields directly! -INPUT PARAMETERS: - A - array[N+1], polynomial coefficients: - * A[0] is constant term - * A[N] is a coefficient of X^N - N - polynomial degree +NOTES ON THE SPARSE STORAGE FORMATS -OUTPUT PARAMETERS: - X - array of complex roots: - * for isolated real root, X[I] is strictly real: IMAGE(X[I])=0 - * complex roots are always returned in pairs - roots occupy - positions I and I+1, with: - * X[I+1]=Conj(X[I]) - * IMAGE(X[I]) > 0 - * IMAGE(X[I+1]) = -IMAGE(X[I]) < 0 - * multiple real roots may have non-zero imaginary part due - to roundoff errors. There is no reliable way to distinguish - real root of multiplicity 2 from two complex roots in - the presence of roundoff errors. - Rep - report, additional information, following fields are set: - * Rep.MaxErr - max( |P(xi)| ) for i=0..N-1. This field - allows to quickly estimate "quality" of the roots being - returned. +Sparse matrices can be stored using several formats: +* Hash-Table representation +* Compressed Row Storage (CRS) +* Skyline matrix storage (SKS) -NOTE: this function uses companion matrix method to find roots. In case - internal EVD solver fails do find eigenvalues, exception is - generated. +Each of the formats has benefits and drawbacks: +* Hash-table is good for dynamic operations (insertion of new elements), + but does not support linear algebra operations +* CRS is good for operations like matrix-vector or matrix-matrix products, + but its initialization is less convenient - you have to tell row sizes + at the initialization, and you have to fill matrix only row by row, + from left to right. +* SKS is a special format which is used to store triangular factors from + Cholesky factorization. It does not support dynamic modification, and + support for linear algebra operations is very limited. -NOTE: roots are not "polished" and no matrix balancing is performed - for them. +Tables below outline information about these two formats: - -- ALGLIB -- - Copyright 24.02.2014 by Bochkanov Sergey + OPERATIONS WITH MATRIX HASH CRS SKS + creation + + + + SparseGet + + + + SparseRewriteExisting + + + + SparseSet + + + + SparseAdd + + SparseGetRow + + + SparseGetCompressedRow + + + sparse-dense linear algebra + + *************************************************************************/ -
    void alglib::polynomialsolve( - real_1d_array a, - ae_int_t n, - complex_1d_array& x, - polynomialsolverreport& rep); +
    class sparsematrix +{ +};
    - -
    - -psi
    - - -
    - +
     
    /************************************************************************* -Psi (digamma) function - - d - - psi(x) = -- ln | (x) - dx - -is the logarithmic derivative of the gamma function. -For integer x, - n-1 - - -psi(n) = -EUL + > 1/k. - - - k=1 +This function adds value to S[i,j] - element of the sparse matrix. Matrix +must be in a Hash-Table mode. -This formula is used for 0 < n <= 10. If x is negative, it -is transformed to a positive argument by the reflection -formula psi(1-x) = psi(x) + pi cot(pi x). -For general positive x, the argument is made greater than 10 -using the recurrence psi(x+1) = psi(x) + 1/x. -Then the following asymptotic expansion is applied: +In case S[i,j] already exists in the table, V i added to its value. In +case S[i,j] is non-existent, it is inserted in the table. Table +automatically grows when necessary. - inf. B - - 2k -psi(x) = log(x) - 1/2x - > ------- - - 2k - k=1 2k x +INPUT PARAMETERS + S - sparse M*N matrix in Hash-Table representation. + Exception will be thrown for CRS matrix. + I - row index of the element to modify, 0<=I<M + J - column index of the element to modify, 0<=J<N + V - value to add, must be finite number -where the B2k are Bernoulli numbers. +OUTPUT PARAMETERS + S - modified matrix -ACCURACY: - Relative error (except absolute when |psi| < 1): -arithmetic domain # trials peak rms - IEEE 0,30 30000 1.3e-15 1.4e-16 - IEEE -30,0 40000 1.5e-15 2.2e-16 +NOTE 1: when S[i,j] is exactly zero after modification, it is deleted +from the table. -Cephes Math Library Release 2.8: June, 2000 -Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::psi(double x); +
    void alglib::sparseadd( + sparsematrix s, + ae_int_t i, + ae_int_t j, + double v, + const xparams _params = alglib::xdefault);
    - - - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Barycentric interpolant. +This function performs in-place conversion to desired sparse storage +format. + +INPUT PARAMETERS + S0 - sparse matrix in any format. + Fmt - desired storage format of the output, as returned by + SparseGetMatrixType() function: + * 0 for hash-based storage + * 1 for CRS + * 2 for SKS + +OUTPUT PARAMETERS + S0 - sparse matrix in requested format. + +NOTE: in-place conversion wastes a lot of memory which is used to store + temporaries. If you perform a lot of repeated conversions, we + recommend to use out-of-place buffered conversion functions, like + SparseCopyToBuf(), which can reuse already allocated memory. + + -- ALGLIB PROJECT -- + Copyright 16.01.2014 by Bochkanov Sergey *************************************************************************/ -
    class barycentricinterpolant -{ -}; +
    void alglib::sparseconvertto( + sparsematrix s0, + ae_int_t fmt, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Rational interpolant without poles +This function converts matrix to CRS format. -The subroutine constructs the rational interpolating function without real -poles (see 'Barycentric rational interpolation with no poles and high -rates of approximation', Michael S. Floater. and Kai Hormann, for more -information on this subject). +Some algorithms (linear algebra ones, for example) require matrices in +CRS format. This function allows to perform in-place conversion. -Input parameters: - X - interpolation nodes, array[0..N-1]. - Y - function values, array[0..N-1]. - N - number of nodes, N>0. - D - order of the interpolation scheme, 0 <= D <= N-1. - D<0 will cause an error. - D>=N it will be replaced with D=N-1. - if you don't know what D to choose, use small value about 3-5. +INPUT PARAMETERS + S - sparse M*N matrix in any format -Output parameters: - B - barycentric interpolant. +OUTPUT PARAMETERS + S - matrix in CRS format -Note: - this algorithm always succeeds and calculates the weights with close - to machine precision. +NOTE: this function has no effect when called with matrix which is + already in CRS mode. + +NOTE: this function allocates temporary memory to store a copy of the + matrix. If you perform a lot of repeated conversions, we recommend + you to use SparseCopyToCRSBuf() function, which can reuse + previously allocated memory. -- ALGLIB PROJECT -- - Copyright 17.06.2007 by Bochkanov Sergey + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentricbuildfloaterhormann( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t d, - barycentricinterpolant& b); +
    void alglib::sparseconverttocrs( + sparsematrix s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Rational interpolant from X/Y/W arrays +This function performs in-place conversion to Hash table storage. -F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) +INPUT PARAMETERS + S - sparse matrix in CRS format. -INPUT PARAMETERS: - X - interpolation nodes, array[0..N-1] - F - function values, array[0..N-1] - W - barycentric weights, array[0..N-1] - N - nodes count, N>0 +OUTPUT PARAMETERS + S - sparse matrix in Hash table format. -OUTPUT PARAMETERS: - B - barycentric interpolant built from (X, Y, W) +NOTE: this function has no effect when called with matrix which is + already in Hash table mode. - -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey +NOTE: in-place conversion involves allocation of temporary arrays. If you + perform a lot of repeated in- place conversions, it may lead to + memory fragmentation. Consider using out-of-place SparseCopyToHashBuf() + function in this case. + + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentricbuildxyw( - real_1d_array x, - real_1d_array y, - real_1d_array w, - ae_int_t n, - barycentricinterpolant& b); +
    void alglib::sparseconverttohash( + sparsematrix s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Rational interpolation using barycentric formula +This function performs in-place conversion to SKS format. -F(t) = SUM(i=0,n-1,w[i]*f[i]/(t-x[i])) / SUM(i=0,n-1,w[i]/(t-x[i])) +INPUT PARAMETERS + S - sparse matrix in any format. -Input parameters: - B - barycentric interpolant built with one of model building - subroutines. - T - interpolation point +OUTPUT PARAMETERS + S - sparse matrix in SKS format. -Result: - barycentric interpolant F(t) +NOTE: this function has no effect when called with matrix which is + already in SKS mode. - -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey +NOTE: in-place conversion involves allocation of temporary arrays. If you + perform a lot of repeated in- place conversions, it may lead to + memory fragmentation. Consider using out-of-place SparseCopyToSKSBuf() + function in this case. + + -- ALGLIB PROJECT -- + Copyright 15.01.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::barycentriccalc(barycentricinterpolant b, double t); +
    void alglib::sparseconverttosks( + sparsematrix s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Differentiation of barycentric interpolant: first derivative. - -Algorithm used in this subroutine is very robust and should not fail until -provided with values too close to MaxRealNumber (usually MaxRealNumber/N -or greater will overflow). +This function copies S0 to S1. +This function completely deallocates memory owned by S1 before creating a +copy of S0. If you want to reuse memory, use SparseCopyBuf. -INPUT PARAMETERS: - B - barycentric interpolant built with one of model building - subroutines. - T - interpolation point +NOTE: this function does not verify its arguments, it just copies all +fields of the structure. -OUTPUT PARAMETERS: - F - barycentric interpolant at T - DF - first derivative + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsecopy( + sparsematrix s0, + sparsematrix& s1, + const xparams _params = alglib::xdefault); -NOTE +
    + +
    +
    /************************************************************************* +This function copies S0 to S1. +Memory already allocated in S1 is reused as much as possible. +NOTE: this function does not verify its arguments, it just copies all +fields of the structure. - -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentricdiff1( - barycentricinterpolant b, - double t, - double& f, - double& df); +
    void alglib::sparsecopybuf( + sparsematrix s0, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Differentiation of barycentric interpolant: first/second derivatives. - -INPUT PARAMETERS: - B - barycentric interpolant built with one of model building - subroutines. - T - interpolation point - -OUTPUT PARAMETERS: - F - barycentric interpolant at T - DF - first derivative - D2F - second derivative +This function performs out-of-place conversion to desired sparse storage +format. S0 is copied to S1 and converted on-the-fly. Memory allocated in +S1 is reused to maximum extent possible. -NOTE: this algorithm may fail due to overflow/underflor if used on data -whose values are close to MaxRealNumber or MinRealNumber. Use more robust -BarycentricDiff1() subroutine in such cases. +INPUT PARAMETERS + S0 - sparse matrix in any format. + Fmt - desired storage format of the output, as returned by + SparseGetMatrixType() function: + * 0 for hash-based storage + * 1 for CRS + * 2 for SKS +OUTPUT PARAMETERS + S1 - sparse matrix in requested format. - -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 16.01.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentricdiff2( - barycentricinterpolant b, - double t, - double& f, - double& df, - double& d2f); +
    void alglib::sparsecopytobuf( + sparsematrix s0, + ae_int_t fmt, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine performs linear transformation of the argument. +This function performs out-of-place conversion to CRS format. S0 is +copied to S1 and converted on-the-fly. -INPUT PARAMETERS: - B - rational interpolant in barycentric form - CA, CB - transformation coefficients: x = CA*t + CB +INPUT PARAMETERS + S0 - sparse matrix in any format. + +OUTPUT PARAMETERS + S1 - sparse matrix in CRS format. + +NOTE: if S0 is stored as CRS, it is just copied without conversion. -OUTPUT PARAMETERS: - B - transformed interpolant with X replaced by T +NOTE: this function de-allocates memory occupied by S1 before starting CRS + conversion. If you perform a lot of repeated CRS conversions, it may + lead to memory fragmentation. In this case we recommend you to use + SparseCopyToCRSBuf() function which re-uses memory in S1 as much as + possible. -- ALGLIB PROJECT -- - Copyright 19.08.2009 by Bochkanov Sergey + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentriclintransx( - barycentricinterpolant b, - double ca, - double cb); +
    void alglib::sparsecopytocrs( + sparsematrix s0, + sparsematrix& s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine performs linear transformation of the barycentric -interpolant. +This function performs out-of-place conversion to CRS format. S0 is +copied to S1 and converted on-the-fly. Memory allocated in S1 is reused to +maximum extent possible. -INPUT PARAMETERS: - B - rational interpolant in barycentric form - CA, CB - transformation coefficients: B2(x) = CA*B(x) + CB +INPUT PARAMETERS + S0 - sparse matrix in any format. + S1 - matrix which may contain some pre-allocated memory, or + can be just uninitialized structure. -OUTPUT PARAMETERS: - B - transformed interpolant +OUTPUT PARAMETERS + S1 - sparse matrix in CRS format. + +NOTE: if S0 is stored as CRS, it is just copied without conversion. -- ALGLIB PROJECT -- - Copyright 19.08.2009 by Bochkanov Sergey + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::barycentriclintransy( - barycentricinterpolant b, - double ca, - double cb); +
    void alglib::sparsecopytocrsbuf( + sparsematrix s0, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Extracts X/Y/W arrays from rational interpolant - -INPUT PARAMETERS: - B - barycentric interpolant +This function performs out-of-place conversion to Hash table storage +format. S0 is copied to S1 and converted on-the-fly. -OUTPUT PARAMETERS: - N - nodes count, N>0 - X - interpolation nodes, array[0..N-1] - F - function values, array[0..N-1] - W - barycentric weights, array[0..N-1] +INPUT PARAMETERS + S0 - sparse matrix in any format. - -- ALGLIB -- - Copyright 17.08.2009 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::barycentricunpack( - barycentricinterpolant b, - ae_int_t& n, - real_1d_array& x, - real_1d_array& y, - real_1d_array& w); +OUTPUT PARAMETERS + S1 - sparse matrix in Hash table format. -
    - -
    - -rbfmodel
    -rbfreport
    - -rbfbuildmodel
    -rbfcalc
    -rbfcalc2
    -rbfcalc3
    -rbfcalcbuf
    -rbfcreate
    -rbfgridcalc2
    -rbfserialize
    -rbfsetalgomultilayer
    -rbfsetalgoqnn
    -rbfsetconstterm
    -rbfsetlinterm
    -rbfsetpoints
    -rbfsetzeroterm
    -rbfunpack
    -rbfunserialize
    - - - - - - - - -
    rbf_d_ml_ls Least squares problem solved with RBF-ML algorithm
    rbf_d_ml_simple Simple model built with RBF-ML algorithm
    rbf_d_polterm RBF models - working with polynomial term
    rbf_d_qnn Simple model built with RBF-QNN algorithm
    rbf_d_serialize Serialization/unserialization
    rbf_d_vector Working with vector functions
    - -
    -
    /************************************************************************* -RBF model. +NOTE: if S0 is stored as Hash-table, it is just copied without conversion. -Never try to directly work with fields of this object - always use ALGLIB -functions to use this object. -*************************************************************************/ -
    class rbfmodel -{ -}; +NOTE: this function de-allocates memory occupied by S1 before starting + conversion. If you perform a lot of repeated conversions, it may + lead to memory fragmentation. In this case we recommend you to use + SparseCopyToHashBuf() function which re-uses memory in S1 as much as + possible. -
    - -
    -
    /************************************************************************* -RBF solution report: -* TerminationType - termination type, positive values - success, - non-positive - failure. + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    class rbfreport -{ - ae_int_t arows; - ae_int_t acols; - ae_int_t annz; - ae_int_t iterationscount; - ae_int_t nmv; - ae_int_t terminationtype; -}; +
    void alglib::sparsecopytohash( + sparsematrix s0, + sparsematrix& s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function builds RBF model and returns report (contains some -information which can be used for evaluation of the algorithm properties). - -Call to this function modifies RBF model by calculating its centers/radii/ -weights and saving them into RBFModel structure. Initially RBFModel -contain zero coefficients, but after call to this function we will have -coefficients which were calculated in order to fit our dataset. +This function performs out-of-place conversion to Hash table storage +format. S0 is copied to S1 and converted on-the-fly. Memory allocated in +S1 is reused to maximum extent possible. -After you called this function you can call RBFCalc(), RBFGridCalc() and -other model calculation functions. +INPUT PARAMETERS + S0 - sparse matrix in any format. -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call - Rep - report: - * Rep.TerminationType: - * -5 - non-distinct basis function centers were detected, - interpolation aborted - * -4 - nonconvergence of the internal SVD solver - * 1 - successful termination - Fields are used for debugging purposes: - * Rep.IterationsCount - iterations count of the LSQR solver - * Rep.NMV - number of matrix-vector products - * Rep.ARows - rows count for the system matrix - * Rep.ACols - columns count for the system matrix - * Rep.ANNZ - number of significantly non-zero elements - (elements above some algorithm-determined threshold) +OUTPUT PARAMETERS + S1 - sparse matrix in Hash table format. -NOTE: failure to build model will leave current state of the structure -unchanged. +NOTE: if S0 is stored as Hash-table, it is just copied without conversion. - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfbuildmodel(rbfmodel s, rbfreport& rep); +
    void alglib::sparsecopytohashbuf( + sparsematrix s0, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -This function calculates values of the RBF model at the given point. +This function performs out-of-place conversion to SKS storage format. +S0 is copied to S1 and converted on-the-fly. -This is general function which can be used for arbitrary NX (dimension of -the space of arguments) and NY (dimension of the function itself). However -when you have NY=1 you may find more convenient to use RBFCalc2() or -RBFCalc3(). +INPUT PARAMETERS + S0 - sparse matrix in any format. -This function returns 0.0 when model is not initialized. +OUTPUT PARAMETERS + S1 - sparse matrix in SKS format. -INPUT PARAMETERS: - S - RBF model - X - coordinates, array[NX]. - X may have more than NX elements, in this case only - leading NX will be used. +NOTE: if S0 is stored as SKS, it is just copied without conversion. -OUTPUT PARAMETERS: - Y - function value, array[NY]. Y is out-parameter and - reallocated after call to this function. In case you want - to reuse previously allocated Y, you may use RBFCalcBuf(), - which reallocates Y only when it is too small. +NOTE: this function de-allocates memory occupied by S1 before starting + conversion. If you perform a lot of repeated conversions, it may + lead to memory fragmentation. In this case we recommend you to use + SparseCopyToSKSBuf() function which re-uses memory in S1 as much as + possible. - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfcalc(rbfmodel s, real_1d_array x, real_1d_array& y); +
    void alglib::sparsecopytosks( + sparsematrix s0, + sparsematrix& s1, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function calculates values of the RBF model in the given point. - -This function should be used when we have NY=1 (scalar function) and NX=2 -(2-dimensional space). If you have 3-dimensional space, use RBFCalc3(). If -you have general situation (NX-dimensional space, NY-dimensional function) -you should use general, less efficient implementation RBFCalc(). - -If you want to calculate function values many times, consider using -RBFGridCalc2(), which is far more efficient than many subsequent calls to -RBFCalc2(). +This function performs out-of-place conversion to SKS format. S0 is +copied to S1 and converted on-the-fly. Memory allocated in S1 is reused +to maximum extent possible. -This function returns 0.0 when: -* model is not initialized -* NX<>2 - *NY<>1 +INPUT PARAMETERS + S0 - sparse matrix in any format. -INPUT PARAMETERS: - S - RBF model - X0 - first coordinate, finite number - X1 - second coordinate, finite number +OUTPUT PARAMETERS + S1 - sparse matrix in SKS format. -RESULT: - value of the model or 0.0 (as defined above) +NOTE: if S0 is stored as SKS, it is just copied without conversion. - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rbfcalc2(rbfmodel s, double x0, double x1); +
    void alglib::sparsecopytosksbuf( + sparsematrix s0, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  

    - +
     
    /************************************************************************* -This function calculates values of the RBF model in the given point. - -This function should be used when we have NY=1 (scalar function) and NX=3 -(3-dimensional space). If you have 2-dimensional space, use RBFCalc2(). If -you have general situation (NX-dimensional space, NY-dimensional function) -you should use general, less efficient implementation RBFCalc(). - -This function returns 0.0 when: -* model is not initialized -* NX<>3 - *NY<>1 +This function performs copying with transposition of CRS matrix. -INPUT PARAMETERS: - S - RBF model - X0 - first coordinate, finite number - X1 - second coordinate, finite number - X2 - third coordinate, finite number +INPUT PARAMETERS + S0 - sparse matrix in CRS format. -RESULT: - value of the model or 0.0 (as defined above) +OUTPUT PARAMETERS + S1 - sparse matrix, transposed - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 23.07.2018 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rbfcalc3(rbfmodel s, double x0, double x1, double x2); +
    void alglib::sparsecopytransposecrs( + sparsematrix s0, + sparsematrix& s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates values of the RBF model at the given point. - -Same as RBFCalc(), but does not reallocate Y when in is large enough to -store function values. +This function performs copying with transposition of CRS matrix (buffered +version which reuses memory already allocated by the target as much as +possible). -INPUT PARAMETERS: - S - RBF model - X - coordinates, array[NX]. - X may have more than NX elements, in this case only - leading NX will be used. - Y - possibly preallocated array +INPUT PARAMETERS + S0 - sparse matrix in CRS format. -OUTPUT PARAMETERS: - Y - function value, array[NY]. Y is not reallocated when it - is larger than NY. +OUTPUT PARAMETERS + S1 - sparse matrix, transposed; previously allocated memory is + reused if possible. - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 23.07.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfcalcbuf(rbfmodel s, real_1d_array x, real_1d_array& y); +
    void alglib::sparsecopytransposecrsbuf( + sparsematrix s0, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function creates RBF model for a scalar (NY=1) or vector (NY>1) -function in a NX-dimensional space (NX=2 or NX=3). +This function creates sparse matrix in a Hash-Table format. -Newly created model is empty. It can be used for interpolation right after -creation, but it just returns zeros. You have to add points to the model, -tune interpolation settings, and then call model construction function -RBFBuildModel() which will update model according to your specification. +This function creates Hast-Table matrix, which can be converted to CRS +format after its initialization is over. Typical usage scenario for a +sparse matrix is: +1. creation in a Hash-Table format +2. insertion of the matrix elements +3. conversion to the CRS representation +4. matrix is passed to some linear algebra algorithm -USAGE: -1. User creates model with RBFCreate() -2. User adds dataset with RBFSetPoints() (points do NOT have to be on a - regular grid) -3. (OPTIONAL) User chooses polynomial term by calling: - * RBFLinTerm() to set linear term - * RBFConstTerm() to set constant term - * RBFZeroTerm() to set zero term - By default, linear term is used. -4. User chooses specific RBF algorithm to use: either QNN (RBFSetAlgoQNN) - or ML (RBFSetAlgoMultiLayer). -5. User calls RBFBuildModel() function which rebuilds model according to - the specification -6. User may call RBFCalc() to calculate model value at the specified point, - RBFGridCalc() to calculate model values at the points of the regular - grid. User may extract model coefficients with RBFUnpack() call. +Some information about different matrix formats can be found below, in +the "NOTES" section. -INPUT PARAMETERS: - NX - dimension of the space, NX=2 or NX=3 - NY - function dimension, NY>=1 +INPUT PARAMETERS + M - number of rows in a matrix, M>=1 + N - number of columns in a matrix, N>=1 + K - K>=0, expected number of non-zero elements in a matrix. + K can be inexact approximation, can be less than actual + number of elements (table will grow when needed) or + even zero). + It is important to understand that although hash-table + may grow automatically, it is better to provide good + estimate of data size. -OUTPUT PARAMETERS: - S - RBF model (initially equals to zero) +OUTPUT PARAMETERS + S - sparse M*N matrix in Hash-Table representation. + All elements of the matrix are zero. -NOTE 1: memory requirements. RBF models require amount of memory which is - proportional to the number of data points. Memory is allocated - during model construction, but most of this memory is freed after - model coefficients are calculated. - - Some approximate estimates for N centers with default settings are - given below: - * about 250*N*(sizeof(double)+2*sizeof(int)) bytes of memory is - needed during model construction stage. - * about 15*N*sizeof(double) bytes is needed after model is built. - For example, for N=100000 we may need 0.6 GB of memory to build - model, but just about 0.012 GB to store it. +NOTE 1 - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::rbfcreate(ae_int_t nx, ae_int_t ny, rbfmodel& s); +Hash-tables use memory inefficiently, and they have to keep some amount +of the "spare memory" in order to have good performance. Hash table for +matrix with K non-zero elements will need C*K*(8+2*sizeof(int)) bytes, +where C is a small constant, about 1.5-2 in magnitude. -
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  [6]  

    - -
    -
    /************************************************************************* -This function calculates values of the RBF model at the regular grid. +CRS storage, from the other side, is more memory-efficient, and needs +just K*(8+sizeof(int))+M*sizeof(int) bytes, where M is a number of rows +in a matrix. -Grid have N0*N1 points, with Point[I,J] = (X0[I], X1[J]) +When you convert from the Hash-Table to CRS representation, all unneeded +memory will be freed. -This function returns 0.0 when: -* model is not initialized -* NX<>2 - *NY<>1 +NOTE 2 -INPUT PARAMETERS: - S - RBF model - X0 - array of grid nodes, first coordinates, array[N0] - N0 - grid size (number of nodes) in the first dimension - X1 - array of grid nodes, second coordinates, array[N1] - N1 - grid size (number of nodes) in the second dimension +Comments of SparseMatrix structure outline information about different +sparse storage formats. We recommend you to read them before starting to +use ALGLIB sparse matrices. -OUTPUT PARAMETERS: - Y - function values, array[N0,N1]. Y is out-variable and - is reallocated by this function. +NOTE 3 - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey +This function completely overwrites S with new sparse matrix. Previously +allocated storage is NOT reused. If you want to reuse already allocated +memory, call SparseCreateBuf function. + + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfgridcalc2( - rbfmodel s, - real_1d_array x0, - ae_int_t n0, - real_1d_array x1, - ae_int_t n1, - real_2d_array& y); +
    void alglib::sparsecreate( + ae_int_t m, + ae_int_t n, + sparsematrix& s, + const xparams _params = alglib::xdefault); +void alglib::sparsecreate( + ae_int_t m, + ae_int_t n, + ae_int_t k, + sparsematrix& s, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function serializes data structure to string. +This version of SparseCreate function creates sparse matrix in Hash-Table +format, reusing previously allocated storage as much as possible. Read +comments for SparseCreate() for more information. -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. +INPUT PARAMETERS + M - number of rows in a matrix, M>=1 + N - number of columns in a matrix, N>=1 + K - K>=0, expected number of non-zero elements in a matrix. + K can be inexact approximation, can be less than actual + number of elements (table will grow when needed) or + even zero). + It is important to understand that although hash-table + may grow automatically, it is better to provide good + estimate of data size. + S - SparseMatrix structure which MAY contain some already + allocated storage. + +OUTPUT PARAMETERS + S - sparse M*N matrix in Hash-Table representation. + All elements of the matrix are zero. + Previously allocated storage is reused, if its size + is compatible with expected number of non-zeros K. + + -- ALGLIB PROJECT -- + Copyright 14.01.2014 by Bochkanov Sergey *************************************************************************/ -
    void rbfserialize(rbfmodel &obj, std::string &s_out); +
    void alglib::sparsecreatebuf( + ae_int_t m, + ae_int_t n, + sparsematrix s, + const xparams _params = alglib::xdefault); +void alglib::sparsecreatebuf( + ae_int_t m, + ae_int_t n, + ae_int_t k, + sparsematrix s, + const xparams _params = alglib::xdefault); +
    - +
     
    /************************************************************************* -This function sets RBF interpolation algorithm. ALGLIB supports several -RBF algorithms with different properties. - -This algorithm is called RBF-ML. It builds multilayer RBF model, i.e. -model with subsequently decreasing radii, which allows us to combine -smoothness (due to large radii of the first layers) with exactness (due -to small radii of the last layers) and fast convergence. - -Internally RBF-ML uses many different means of acceleration, from sparse -matrices to KD-trees, which results in algorithm whose working time is -roughly proportional to N*log(N)*Density*RBase^2*NLayers, where N is a -number of points, Density is an average density if points per unit of the -interpolation space, RBase is an initial radius, NLayers is a number of -layers. - -RBF-ML is good for following kinds of interpolation problems: -1. "exact" problems (perfect fit) with well separated points -2. least squares problems with arbitrary distribution of points (algorithm - gives perfect fit where it is possible, and resorts to least squares - fit in the hard areas). -3. noisy problems where we want to apply some controlled amount of - smoothing. +This function creates sparse matrix in a CRS format (expert function for +situations when you are running out of memory). -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call - RBase - RBase parameter, RBase>0 - NLayers - NLayers parameter, NLayers>0, recommended value to start - with - about 5. - LambdaV - regularization value, can be useful when solving problem - in the least squares sense. Optimal lambda is problem- - dependent and require trial and error. In our experience, - good lambda can be as large as 0.1, and you can use 0.001 - as initial guess. - Default value - 0.01, which is used when LambdaV is not - given. You can specify zero value, but it is not - recommended to do so. +This function creates CRS matrix. Typical usage scenario for a CRS matrix +is: +1. creation (you have to tell number of non-zero elements at each row at + this moment) +2. insertion of the matrix elements (row by row, from left to right) +3. matrix is passed to some linear algebra algorithm -TUNING ALGORITHM +This function is a memory-efficient alternative to SparseCreate(), but it +is more complex because it requires you to know in advance how large your +matrix is. Some information about different matrix formats can be found +in comments on SparseMatrix structure. We recommend you to read them +before starting to use ALGLIB sparse matrices.. -In order to use this algorithm you have to choose three parameters: -* initial radius RBase -* number of layers in the model NLayers -* regularization coefficient LambdaV +INPUT PARAMETERS + M - number of rows in a matrix, M>=1 + N - number of columns in a matrix, N>=1 + NER - number of elements at each row, array[M], NER[I]>=0 -Initial radius is easy to choose - you can pick any number several times -larger than the average distance between points. Algorithm won't break -down if you choose radius which is too large (model construction time will -increase, but model will be built correctly). +OUTPUT PARAMETERS + S - sparse M*N matrix in CRS representation. + You have to fill ALL non-zero elements by calling + SparseSet() BEFORE you try to use this matrix. -Choose such number of layers that RLast=RBase/2^(NLayers-1) (radius used -by the last layer) will be smaller than the typical distance between -points. In case model error is too large, you can increase number of -layers. Having more layers will make model construction and evaluation -proportionally slower, but it will allow you to have model which precisely -fits your data. From the other side, if you want to suppress noise, you -can DECREASE number of layers to make your model less flexible. +NOTE: this function completely overwrites S with new sparse matrix. + Previously allocated storage is NOT reused. If you want to reuse + already allocated memory, call SparseCreateCRSBuf function. -Regularization coefficient LambdaV controls smoothness of the individual -models built for each layer. We recommend you to use default value in case -you don't want to tune this parameter, because having non-zero LambdaV -accelerates and stabilizes internal iterative algorithm. In case you want -to suppress noise you can use LambdaV as additional parameter (larger -value = more smoothness) to tune. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsecreatecrs( + ae_int_t m, + ae_int_t n, + integer_1d_array ner, + sparsematrix& s, + const xparams _params = alglib::xdefault); -TYPICAL ERRORS +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This function creates sparse matrix in a CRS format (expert function for +situations when you are running out of memory). This version of CRS +matrix creation function may reuse memory already allocated in S. -1. Using initial radius which is too large. Memory requirements of the - RBF-ML are roughly proportional to N*Density*RBase^2 (where Density is - an average density of points per unit of the interpolation space). In - the extreme case of the very large RBase we will need O(N^2) units of - memory - and many layers in order to decrease radius to some reasonably - small value. +This function creates CRS matrix. Typical usage scenario for a CRS matrix +is: +1. creation (you have to tell number of non-zero elements at each row at + this moment) +2. insertion of the matrix elements (row by row, from left to right) +3. matrix is passed to some linear algebra algorithm -2. Using too small number of layers - RBF models with large radius are not - flexible enough to reproduce small variations in the target function. - You need many layers with different radii, from large to small, in - order to have good model. +This function is a memory-efficient alternative to SparseCreate(), but it +is more complex because it requires you to know in advance how large your +matrix is. Some information about different matrix formats can be found +in comments on SparseMatrix structure. We recommend you to read them +before starting to use ALGLIB sparse matrices.. -3. Using initial radius which is too small. You will get model with - "holes" in the areas which are too far away from interpolation centers. - However, algorithm will work correctly (and quickly) in this case. +INPUT PARAMETERS + M - number of rows in a matrix, M>=1 + N - number of columns in a matrix, N>=1 + NER - number of elements at each row, array[M], NER[I]>=0 + S - sparse matrix structure with possibly preallocated + memory. -4. Using too many layers - you will get too large and too slow model. This - model will perfectly reproduce your function, but maybe you will be - able to achieve similar results with less layers (and less memory). +OUTPUT PARAMETERS + S - sparse M*N matrix in CRS representation. + You have to fill ALL non-zero elements by calling + SparseSet() BEFORE you try to use this matrix. - -- ALGLIB -- - Copyright 02.03.2012 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfsetalgomultilayer( - rbfmodel s, - double rbase, - ae_int_t nlayers); -void alglib::rbfsetalgomultilayer( - rbfmodel s, - double rbase, - ae_int_t nlayers, - double lambdav); +
    void alglib::sparsecreatecrsbuf( + ae_int_t m, + ae_int_t n, + integer_1d_array ner, + sparsematrix s, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -This function sets RBF interpolation algorithm. ALGLIB supports several -RBF algorithms with different properties. +This function creates sparse matrix in a SKS format (skyline storage +format). In most cases you do not need this function - CRS format better +suits most use cases. -This algorithm is called RBF-QNN and it is good for point sets with -following properties: -a) all points are distinct -b) all points are well separated. -c) points distribution is approximately uniform. There is no "contour - lines", clusters of points, or other small-scale structures. +INPUT PARAMETERS + M, N - number of rows(M) and columns (N) in a matrix: + * M=N (as for now, ALGLIB supports only square SKS) + * N>=1 + * M>=1 + D - "bottom" bandwidths, array[M], D[I]>=0. + I-th element stores number of non-zeros at I-th row, + below the diagonal (diagonal itself is not included) + U - "top" bandwidths, array[N], U[I]>=0. + I-th element stores number of non-zeros at I-th row, + above the diagonal (diagonal itself is not included) -Algorithm description: -1) interpolation centers are allocated to data points -2) interpolation radii are calculated as distances to the nearest centers - times Q coefficient (where Q is a value from [0.75,1.50]). -3) after performing (2) radii are transformed in order to avoid situation - when single outlier has very large radius and influences many points - across all dataset. Transformation has following form: - new_r[i] = min(r[i],Z*median(r[])) - where r[i] is I-th radius, median() is a median radius across entire - dataset, Z is user-specified value which controls amount of deviation - from median radius. +OUTPUT PARAMETERS + S - sparse M*N matrix in SKS representation. + All elements are filled by zeros. + You may use sparseset() to change their values. -When (a) is violated, we will be unable to build RBF model. When (b) or -(c) are violated, model will be built, but interpolation quality will be -low. See http://www.alglib.net/interpolation/ for more information on this -subject. +NOTE: this function completely overwrites S with new sparse matrix. + Previously allocated storage is NOT reused. If you want to reuse + already allocated memory, call SparseCreateSKSBuf function. -This algorithm is used by default. + -- ALGLIB PROJECT -- + Copyright 13.01.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsecreatesks( + ae_int_t m, + ae_int_t n, + integer_1d_array d, + integer_1d_array u, + sparsematrix& s, + const xparams _params = alglib::xdefault); -Additional Q parameter controls smoothness properties of the RBF basis: -* Q<0.75 will give perfectly conditioned basis, but terrible smoothness - properties (RBF interpolant will have sharp peaks around function values) -* Q around 1.0 gives good balance between smoothness and condition number -* Q>1.5 will lead to badly conditioned systems and slow convergence of the - underlying linear solver (although smoothness will be very good) -* Q>2.0 will effectively make optimizer useless because it won't converge - within reasonable amount of iterations. It is possible to set such large - Q, but it is advised not to do so. +
    + +
    +
    /************************************************************************* +This function creates sparse matrix in a SKS format (skyline storage +format). Unlike more general sparsecreatesks(), this function creates +sparse matrix with constant bandwidth. -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call - Q - Q parameter, Q>0, recommended value - 1.0 - Z - Z parameter, Z>0, recommended value - 5.0 +You may want to use this function instead of sparsecreatesks() when your +matrix has constant or nearly-constant bandwidth, and you want to +simplify source code. -NOTE: this function has some serialization-related subtleties. We - recommend you to study serialization examples from ALGLIB Reference - Manual if you want to perform serialization of your models. +INPUT PARAMETERS + M, N - number of rows(M) and columns (N) in a matrix: + * M=N (as for now, ALGLIB supports only square SKS) + * N>=1 + * M>=1 + BW - matrix bandwidth, BW>=0 +OUTPUT PARAMETERS + S - sparse M*N matrix in SKS representation. + All elements are filled by zeros. + You may use sparseset() to change their values. - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey +NOTE: this function completely overwrites S with new sparse matrix. + Previously allocated storage is NOT reused. If you want to reuse + already allocated memory, call sparsecreatesksbandbuf function. + + -- ALGLIB PROJECT -- + Copyright 25.12.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfsetalgoqnn(rbfmodel s); -void alglib::rbfsetalgoqnn(rbfmodel s, double q, double z); +
    void alglib::sparsecreatesksband( + ae_int_t m, + ae_int_t n, + ae_int_t bw, + sparsematrix& s, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  

    - +
     
    /************************************************************************* -This function sets constant term (model is a sum of radial basis functions -plus constant). This function won't have effect until next call to -RBFBuildModel(). +This is "buffered" version of sparsecreatesksband() which reuses memory +previously allocated in S (of course, memory is reallocated if needed). -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call +You may want to use this function instead of sparsecreatesksbuf() when +your matrix has constant or nearly-constant bandwidth, and you want to +simplify source code. -NOTE: this function has some serialization-related subtleties. We - recommend you to study serialization examples from ALGLIB Reference - Manual if you want to perform serialization of your models. +INPUT PARAMETERS + M, N - number of rows(M) and columns (N) in a matrix: + * M=N (as for now, ALGLIB supports only square SKS) + * N>=1 + * M>=1 + BW - bandwidth, BW>=0 - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey +OUTPUT PARAMETERS + S - sparse M*N matrix in SKS representation. + All elements are filled by zeros. + You may use sparseset() to change their values. + + -- ALGLIB PROJECT -- + Copyright 13.01.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfsetconstterm(rbfmodel s); +
    void alglib::sparsecreatesksbandbuf( + ae_int_t m, + ae_int_t n, + ae_int_t bw, + sparsematrix s, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function sets linear term (model is a sum of radial basis functions -plus linear polynomial). This function won't have effect until next call -to RBFBuildModel(). +This is "buffered" version of SparseCreateSKS() which reuses memory +previously allocated in S (of course, memory is reallocated if needed). -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call +This function creates sparse matrix in a SKS format (skyline storage +format). In most cases you do not need this function - CRS format better +suits most use cases. -NOTE: this function has some serialization-related subtleties. We - recommend you to study serialization examples from ALGLIB Reference - Manual if you want to perform serialization of your models. +INPUT PARAMETERS + M, N - number of rows(M) and columns (N) in a matrix: + * M=N (as for now, ALGLIB supports only square SKS) + * N>=1 + * M>=1 + D - "bottom" bandwidths, array[M], 0<=D[I]<=I. + I-th element stores number of non-zeros at I-th row, + below the diagonal (diagonal itself is not included) + U - "top" bandwidths, array[N], 0<=U[I]<=I. + I-th element stores number of non-zeros at I-th row, + above the diagonal (diagonal itself is not included) - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey +OUTPUT PARAMETERS + S - sparse M*N matrix in SKS representation. + All elements are filled by zeros. + You may use sparseset() to change their values. + + -- ALGLIB PROJECT -- + Copyright 13.01.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfsetlinterm(rbfmodel s); +
    void alglib::sparsecreatesksbuf( + ae_int_t m, + ae_int_t n, + integer_1d_array d, + integer_1d_array u, + sparsematrix s, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function adds dataset. +This function is used to enumerate all elements of the sparse matrix. +Before first call user initializes T0 and T1 counters by zero. These +counters are used to remember current position in a matrix; after each +call they are updated by the function. -This function overrides results of the previous calls, i.e. multiple calls -of this function will result in only the last set being added. +Subsequent calls to this function return non-zero elements of the sparse +matrix, one by one. If you enumerate CRS matrix, matrix is traversed from +left to right, from top to bottom. In case you enumerate matrix stored as +Hash table, elements are returned in random order. -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call. - XY - points, array[N,NX+NY]. One row corresponds to one point - in the dataset. First NX elements are coordinates, next - NY elements are function values. Array may be larger than - specific, in this case only leading [N,NX+NY] elements - will be used. - N - number of points in the dataset +EXAMPLE + > T0=0 + > T1=0 + > while SparseEnumerate(S,T0,T1,I,J,V) do + > ....do something with I,J,V -After you've added dataset and (optionally) tuned algorithm settings you -should call RBFBuildModel() in order to build a model for you. +INPUT PARAMETERS + S - sparse M*N matrix in Hash-Table or CRS representation. + T0 - internal counter + T1 - internal counter -NOTE: this function has some serialization-related subtleties. We - recommend you to study serialization examples from ALGLIB Reference - Manual if you want to perform serialization of your models. +OUTPUT PARAMETERS + T0 - new value of the internal counter + T1 - new value of the internal counter + I - row index of non-zero element, 0<=I<M. + J - column index of non-zero element, 0<=J<N + V - value of the T-th element +RESULT + True in case of success (next non-zero element was retrieved) + False in case all non-zero elements were enumerated - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey +NOTE: you may call SparseRewriteExisting() during enumeration, but it is + THE ONLY matrix modification function you can call!!! Other + matrix modification functions should not be called during enumeration! + + -- ALGLIB PROJECT -- + Copyright 14.03.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfsetpoints(rbfmodel s, real_2d_array xy); -void alglib::rbfsetpoints(rbfmodel s, real_2d_array xy, ae_int_t n); +
    bool alglib::sparseenumerate( + sparsematrix s, + ae_int_t& t0, + ae_int_t& t1, + ae_int_t& i, + ae_int_t& j, + double& v, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  [3]  [4]  [5]  

    - +
     
    /************************************************************************* -This function sets zero term (model is a sum of radial basis functions -without polynomial term). This function won't have effect until next call -to RBFBuildModel(). +The function frees all memory occupied by sparse matrix. Sparse matrix +structure becomes unusable after this call. -INPUT PARAMETERS: - S - RBF model, initialized by RBFCreate() call +OUTPUT PARAMETERS + S - sparse matrix to delete -NOTE: this function has some serialization-related subtleties. We - recommend you to study serialization examples from ALGLIB Reference - Manual if you want to perform serialization of your models. + -- ALGLIB PROJECT -- + Copyright 24.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsefree( + sparsematrix& s, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This function calculates generalized sparse matrix-vector product + + y := alpha*op(S)*x + beta*y + +Matrix S must be stored in CRS or SKS format (exception will be thrown +otherwise). op(S) can be either S or S^T. + +NOTE: this function expects Y to be large enough to store result. No + automatic preallocation happens for smaller arrays. + +INPUT PARAMETERS + S - sparse matrix in CRS or SKS format. + Alpha - source coefficient + OpS - operation type: + * OpS=0 => op(S) = S + * OpS=1 => op(S) = S^T + X - input vector, must have at least Cols(op(S))+IX elements + IX - subvector offset + Beta - destination coefficient + Y - preallocated output array, must have at least Rows(op(S))+IY elements + IY - subvector offset + +OUTPUT PARAMETERS + Y - elements [IY...IY+Rows(op(S))-1] are replaced by result, + other elements are not modified + +HANDLING OF SPECIAL CASES: +* below M=Rows(op(S)) and N=Cols(op(S)). Although current ALGLIB version + does not allow you to create zero-sized sparse matrices, internally + ALGLIB can deal with such matrices. So, comments for M or N equal to + zero are for internal use only. +* if M=0, then subroutine does nothing. It does not even touch arrays. +* if N=0 or Alpha=0.0, then: + * if Beta=0, then Y is filled by zeros. S and X are not referenced at + all. Initial values of Y are ignored (we do not multiply Y by zero, + we just rewrite it by zeros) + * if Beta<>0, then Y is replaced by Beta*Y +* if M>0, N>0, Alpha<>0, but Beta=0, then Y is replaced by alpha*op(S)*x + initial state of Y is ignored (rewritten without initial multiplication + by zeros). - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. + + -- ALGLIB PROJECT -- + Copyright 10.12.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfsetzeroterm(rbfmodel s); +
    void alglib::sparsegemv( + sparsematrix s, + double alpha, + ae_int_t ops, + real_1d_array x, + ae_int_t ix, + double beta, + real_1d_array& y, + ae_int_t iy, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function "unpacks" RBF model by extracting its coefficients. +This function returns S[i,j] - element of the sparse matrix. Matrix can +be in any mode (Hash-Table, CRS, SKS), but this function is less efficient +for CRS matrices. Hash-Table and SKS matrices can find element in O(1) +time, while CRS matrices need O(log(RS)) time, where RS is an number of +non-zero elements in a row. -INPUT PARAMETERS: - S - RBF model +INPUT PARAMETERS + S - sparse M*N matrix in Hash-Table representation. + Exception will be thrown for CRS matrix. + I - row index of the element to modify, 0<=I<M + J - column index of the element to modify, 0<=J<N -OUTPUT PARAMETERS: - NX - dimensionality of argument - NY - dimensionality of the target function - XWR - model information, array[NC,NX+NY+1]. - One row of the array corresponds to one basis function: - * first NX columns - coordinates of the center - * next NY columns - weights, one per dimension of the - function being modelled - * last column - radius, same for all dimensions of - the function being modelled - NC - number of the centers - V - polynomial term , array[NY,NX+1]. One row per one - dimension of the function being modelled. First NX - elements are linear coefficients, V[NX] is equal to the - constant part. +RESULT + value of S[I,J] or zero (in case no element with such index is found) - -- ALGLIB -- - Copyright 13.12.2011 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    void alglib::rbfunpack( - rbfmodel s, - ae_int_t& nx, - ae_int_t& ny, - real_2d_array& xwr, - ae_int_t& nc, - real_2d_array& v); +
    double alglib::sparseget( + sparsematrix s, + ae_int_t i, + ae_int_t j, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function unserializes data structure from string. -*************************************************************************/ -
    void rbfunserialize(std::string &s_in, rbfmodel &obj); -
    - -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    +This function returns I-th row of the sparse matrix IN COMPRESSED FORMAT -
    +only non-zero elements are returned (with their indexes). Matrix  must  be
    +stored in CRS or SKS format.
     
    +INPUT PARAMETERS:
    +    S           -   sparse M*N matrix in CRS format
    +    I           -   row index, 0<=I<M
    +    ColIdx      -   output buffer for column indexes, can be preallocated.
    +                    In case buffer size is too small to store I-th row, it
    +                    is automatically reallocated.
    +    Vals        -   output buffer for values, can be preallocated. In case
    +                    buffer size is too small to  store  I-th  row,  it  is
    +                    automatically reallocated.
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // This example shows how to solve least squares problems with RBF-ML algorithm.
    -    // Below we assume that you already know basic concepts shown in the RBF_D_QNN and
    -    // RBF_D_ML_SIMPLE examples.
    -    //
    -    rbfmodel model;
    -    rbfreport rep;
    -    double v;
    +OUTPUT PARAMETERS:
    +    ColIdx      -   column   indexes   of  non-zero  elements,  sorted  by
    +                    ascending. Symbolically non-zero elements are  counted
    +                    (i.e. if you allocated place for element, but  it  has
    +                    zero numerical value - it is counted).
    +    Vals        -   values. Vals[K] stores value of  matrix  element  with
    +                    indexes (I,ColIdx[K]). Symbolically non-zero  elements
    +                    are counted (i.e. if you allocated place for  element,
    +                    but it has zero numerical value - it is counted).
    +    NZCnt       -   number of symbolically non-zero elements per row.
     
    -    //
    -    // We have 2-dimensional space and very simple fitting problem - all points
    -    // except for two are well separated and located at straight line. Two
    -    // "exceptional" points are very close, with distance between them as small
    -    // as 0.01. RBF-QNN algorithm will have many difficulties with such distribution
    -    // of points:
    -    //     X        Y
    -    //     -2       1
    -    //     -1       0
    -    //     -0.005   1
    -    //     +0.005   2
    -    //     +1      -1
    -    //     +2       1
    -    // How will RBF-ML handle such problem?
    -    //
    -    rbfcreate(2, 1, model);
    -    real_2d_array xy0 = "[[-2,0,1],[-1,0,0],[-0.005,0,1],[+0.005,0,2],[+1,0,-1],[+2,0,1]]";
    -    rbfsetpoints(model, xy0);
    +NOTE: when  incorrect  I  (outside  of  [0,M-1]) or  matrix (non  CRS/SKS)
    +      is passed, this function throws exception.
     
    -    // First, we try to use R=5.0 with single layer (NLayers=1) and moderate amount
    -    // of regularization. Well, we already expected that results will be bad:
    -    //     Model(x=-2,y=0)=0.8407    (instead of 1.0)
    -    //     Model(x=0.005,y=0)=0.6584 (instead of 2.0)
    -    // We need more layers to show better results.
    -    rbfsetalgomultilayer(model, 5.0, 1, 1.0e-3);
    -    rbfbuildmodel(model, rep);
    -    v = rbfcalc2(model, -2.0, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 0.8407981659
    -    v = rbfcalc2(model, 0.005, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 0.6584267649
    -
    -    // With 4 layers we got better result at x=-2 (point which is well separated
    -    // from its neighbors). Model is now many times closer to the original data
    -    //     Model(x=-2,y=0)=0.9992    (instead of 1.0)
    -    //     Model(x=0.005,y=0)=1.5534 (instead of 2.0)
    -    // We may see that at x=0.005 result is a bit closer to 2.0, but does not
    -    // reproduce function value precisely because of close neighbor located at
    -    // at x=-0.005. Let's add two layers...
    -    rbfsetalgomultilayer(model, 5.0, 4, 1.0e-3);
    -    rbfbuildmodel(model, rep);
    -    v = rbfcalc2(model, -2.0, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 0.9992673278
    -    v = rbfcalc2(model, 0.005, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 1.5534666012
    -
    -    // With 6 layers we got almost perfect fit:
    -    //     Model(x=-2,y=0)=1.000    (perfect fit)
    -    //     Model(x=0.005,y=0)=1.996 (instead of 2.0)
    -    // Of course, we can reduce error at x=0.005 down to zero by adding more
    -    // layers. But do we really need it?
    -    rbfsetalgomultilayer(model, 5.0, 6, 1.0e-3);
    -    rbfbuildmodel(model, rep);
    -    v = rbfcalc2(model, -2.0, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 1.0000000000
    -    v = rbfcalc2(model, 0.005, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 1.9965775952
    -
    -    // Do we really need zero error? We have f(+0.005)=2 and f(-0.005)=1.
    -    // Two points are very close, and in real life situations it often means
    -    // that difference in function values can be explained by noise in the
    -    // data. Thus, true value of the underlying function should be close to
    -    // 1.5 (halfway between 1.0 and 2.0).
    -    //
    -    // How can we get such result with RBF-ML? Well, we can:
    -    // a) reduce number of layers (make model less flexible)
    -    // b) increase regularization coefficient (another way of reducing flexibility)
    -    //
    -    // Having NLayers=5 and LambdaV=0.1 gives us good least squares fit to the data:
    -    //     Model(x=-2,y=0)=1.000
    -    //     Model(x=-0.005,y=0)=1.504
    -    //     Model(x=+0.005,y=0)=1.496
    -    rbfsetalgomultilayer(model, 5.0, 5, 1.0e-1);
    -    rbfbuildmodel(model, rep);
    -    v = rbfcalc2(model, -2.0, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 1.0000001620
    -    v = rbfcalc2(model, -0.005, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 1.5042954378
    -    v = rbfcalc2(model, 0.005, 0.0);
    -    printf("%.2f\n", double(v)); // EXPECTED: 1.4957042013
    -    return 0;
    -}
    +NOTE: this function may allocate additional, unnecessary place for  ColIdx
    +      and Vals arrays. It is dictated by  performance  reasons  -  on  SKS
    +      matrices it is faster  to  allocate  space  at  the  beginning  with
    +      some "extra"-space, than performing two passes over matrix  -  first
    +      time to calculate exact space required for data, second  time  -  to
    +      store data itself.
     
    +  -- ALGLIB PROJECT --
    +     Copyright 10.12.2014 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::sparsegetcompressedrow( + sparsematrix s, + ae_int_t i, + integer_1d_array& colidx, + real_1d_array& vals, + ae_int_t& nzcnt, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +
    /************************************************************************* +This function returns I-th diagonal element of the sparse matrix. -using namespace alglib; +Matrix can be in any mode (Hash-Table or CRS storage), but this function +is most efficient for CRS matrices - it requires less than 50 CPU cycles +to extract diagonal element. For Hash-Table matrices we still have O(1) +query time, but function is many times slower. +INPUT PARAMETERS + S - sparse M*N matrix in Hash-Table representation. + Exception will be thrown for CRS matrix. + I - index of the element to modify, 0<=I<min(M,N) -int main(int argc, char **argv) -{ - // - // This example shows how to build models with RBF-ML algorithm. Below - // we assume that you already know basic concepts shown in the example - // on RBF-QNN algorithm. - // - // RBF-ML is a multilayer RBF algorithm, which fits a sequence of models - // with decreasing radii. Each model is fitted with fixed number of - // iterations of linear solver. First layers give only inexact approximation - // of the target function, because RBF problems with large radii are - // ill-conditioned. However, as we add more and more layers with smaller - // and smaller radii, we get better conditioned systems - and more precise models. - // - rbfmodel model; - rbfreport rep; - double v; +RESULT + value of S[I,I] or zero (in case no element with such index is found) - // - // We have 2-dimensional space and very simple interpolation problem - all - // points are distinct and located at straight line. We want to solve it - // with RBF-ML algorithm. This problem is very simple, and RBF-QNN will - // solve it too, but we want to evaluate RBF-ML and to start from the simple - // problem. - // X Y - // -2 1 - // -1 0 - // 0 1 - // +1 -1 - // +2 1 - // - rbfcreate(2, 1, model); - real_2d_array xy0 = "[[-2,0,1],[-1,0,0],[0,0,1],[+1,0,-1],[+2,0,1]]"; - rbfsetpoints(model, xy0); + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    double alglib::sparsegetdiagonal( + sparsematrix s, + ae_int_t i, + const xparams _params = alglib::xdefault); - // First, we try to use R=5.0 with single layer (NLayers=1) and moderate amount - // of regularization.... but results are disappointing: Model(x=0,y=0)=-0.02, - // and we need 1.0 at (x,y)=(0,0). Why? - // - // Because first layer gives very smooth and imprecise approximation of the - // function. Average distance between points is 1.0, and R=5.0 is too large - // to give us flexible model. It can give smoothness, but can't give precision. - // So we need more layers with smaller radii. - rbfsetalgomultilayer(model, 5.0, 1, 1.0e-3); - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 - v = rbfcalc2(model, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: -0.021690 +
    + +
    +
    /************************************************************************* +The function returns number of strictly lower triangular non-zero elements +in the matrix. It counts SYMBOLICALLY non-zero elements, i.e. entries +in the sparse matrix data structure. If some element has zero numerical +value, it is still counted. - // Now we know that single layer is not enough. We still want to start with - // R=5.0 because it has good smoothness properties, but we will add more layers, - // each with R[i+1]=R[i]/2. We think that 4 layers is enough, because last layer - // will have R = 5.0/2^3 = 5/8 ~ 0.63, which is smaller than the average distance - // between points. And it works! - rbfsetalgomultilayer(model, 5.0, 4, 1.0e-3); - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 - v = rbfcalc2(model, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 1.000000 +This function has different cost for different types of matrices: +* for hash-based matrices it involves complete pass over entire hash-table + with O(NNZ) cost, where NNZ is number of non-zero elements +* for CRS and SKS matrix types cost of counting is O(N) (N - matrix size). - // BTW, if you look at v, you will see that it is equal to 0.9999999997, not to 1. - // This small error can be fixed by adding one more layer. - return 0; -} +RESULT: number of non-zero elements strictly below main diagonal + -- ALGLIB PROJECT -- + Copyright 12.02.2014 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::sparsegetlowercount( + sparsematrix s, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +This function returns type of the matrix storage format. +INPUT PARAMETERS: + S - sparse matrix. -int main(int argc, char **argv) -{ - // - // This example show how to work with polynomial term - // - // Suppose that we have set of 2-dimensional points with associated - // scalar function values, and we want to build a RBF model using - // our data. - // - double v; - rbfmodel model; - real_2d_array xy = "[[-1,0,2],[+1,0,3]]"; - rbfreport rep; +RESULT: + sparse storage format used by matrix: + 0 - Hash-table + 1 - CRS (compressed row storage) + 2 - SKS (skyline) - rbfcreate(2, 1, model); - rbfsetpoints(model, xy); - rbfsetalgoqnn(model); +NOTE: future versions of ALGLIB may include additional sparse storage + formats. - // - // By default, RBF model uses linear term. It means that model - // looks like - // f(x,y) = SUM(RBF[i]) + a*x + b*y + c - // where RBF[i] is I-th radial basis function and a*x+by+c is a - // linear term. Having linear terms in a model gives us: - // (1) improved extrapolation properties - // (2) linearity of the model when data can be perfectly fitted - // by the linear function - // (3) linear asymptotic behavior - // - // Our simple dataset can be modelled by the linear function - // f(x,y) = 0.5*x + 2.5 - // and rbfbuildmodel() with default settings should preserve this - // linearity. - // - ae_int_t nx; - ae_int_t ny; - ae_int_t nc; - real_2d_array xwr; - real_2d_array c; - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 - rbfunpack(model, nx, ny, xwr, nc, c); - printf("%s\n", c.tostring(2).c_str()); // EXPECTED: [[0.500,0.000,2.500]] - // asymptotic behavior of our function is linear - v = rbfcalc2(model, 1000.0, 0.0); - printf("%.1f\n", double(v)); // EXPECTED: 502.50 + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::sparsegetmatrixtype( + sparsematrix s, + const xparams _params = alglib::xdefault); - // - // Instead of linear term we can use constant term. In this case - // we will get model which has form - // f(x,y) = SUM(RBF[i]) + c - // where RBF[i] is I-th radial basis function and c is a constant, - // which is equal to the average function value on the dataset. - // - // Because we've already attached dataset to the model the only - // thing we have to do is to call rbfsetconstterm() and then - // rebuild model with rbfbuildmodel(). - // - rbfsetconstterm(model); - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 - rbfunpack(model, nx, ny, xwr, nc, c); - printf("%s\n", c.tostring(2).c_str()); // EXPECTED: [[0.000,0.000,2.500]] +
    + +
    +
    /************************************************************************* +The function returns number of columns of a sparse matrix. - // asymptotic behavior of our function is constant - v = rbfcalc2(model, 1000.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 2.500 +RESULT: number of columns of a sparse matrix. - // - // Finally, we can use zero term. Just plain RBF without polynomial - // part: - // f(x,y) = SUM(RBF[i]) - // where RBF[i] is I-th radial basis function. - // - rbfsetzeroterm(model); - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 - rbfunpack(model, nx, ny, xwr, nc, c); - printf("%s\n", c.tostring(2).c_str()); // EXPECTED: [[0.000,0.000,0.000]] + -- ALGLIB PROJECT -- + Copyright 23.08.2012 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::sparsegetncols( + sparsematrix s, + const xparams _params = alglib::xdefault); - // asymptotic behavior of our function is just zero constant - v = rbfcalc2(model, 1000.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 0.000 - return 0; -} +
    + +
    +
    /************************************************************************* +The function returns number of rows of a sparse matrix. + +RESULT: number of rows of a sparse matrix. + -- ALGLIB PROJECT -- + Copyright 23.08.2012 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::sparsegetnrows( + sparsematrix s, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +
    /************************************************************************* +This function returns I-th row of the sparse matrix. Matrix must be stored +in CRS or SKS format. -using namespace alglib; +INPUT PARAMETERS: + S - sparse M*N matrix in CRS format + I - row index, 0<=I<M + IRow - output buffer, can be preallocated. In case buffer + size is too small to store I-th row, it is + automatically reallocated. +OUTPUT PARAMETERS: + IRow - array[M], I-th row. -int main(int argc, char **argv) -{ - // - // This example illustrates basic concepts of the RBF models: creation, modification, - // evaluation. - // - // Suppose that we have set of 2-dimensional points with associated - // scalar function values, and we want to build a RBF model using - // our data. - // - // NOTE: we can work with 3D models too :) - // - // Typical sequence of steps is given below: - // 1. we create RBF model object - // 2. we attach our dataset to the RBF model and tune algorithm settings - // 3. we rebuild RBF model using QNN algorithm on new data - // 4. we use RBF model (evaluate, serialize, etc.) - // - double v; +NOTE: this function has O(N) running time, where N is a column count. It + allocates and fills N-element array, even although most of its + elemets are zero. - // - // Step 1: RBF model creation. - // - // We have to specify dimensionality of the space (2 or 3) and - // dimensionality of the function (scalar or vector). - // - rbfmodel model; - rbfcreate(2, 1, model); +NOTE: If you have O(non-zeros-per-row) time and memory requirements, use + SparseGetCompressedRow() function. It returns data in compressed + format. - // New model is empty - it can be evaluated, - // but we just get zero value at any point. - v = rbfcalc2(model, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 0.000 +NOTE: when incorrect I (outside of [0,M-1]) or matrix (non CRS/SKS) + is passed, this function throws exception. - // - // Step 2: we add dataset. - // - // XY arrays containt two points - x0=(-1,0) and x1=(+1,0) - - // and two function values f(x0)=2, f(x1)=3. - // - real_2d_array xy = "[[-1,0,2],[+1,0,3]]"; - rbfsetpoints(model, xy); + -- ALGLIB PROJECT -- + Copyright 10.12.2014 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsegetrow( + sparsematrix s, + ae_int_t i, + real_1d_array& irow, + const xparams _params = alglib::xdefault); - // We added points, but model was not rebuild yet. - // If we call rbfcalc2(), we still will get 0.0 as result. - v = rbfcalc2(model, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 0.000 +
    + +
    +
    /************************************************************************* +The function returns number of strictly upper triangular non-zero elements +in the matrix. It counts SYMBOLICALLY non-zero elements, i.e. entries +in the sparse matrix data structure. If some element has zero numerical +value, it is still counted. - // - // Step 3: rebuild model - // - // After we've configured model, we should rebuild it - - // it will change coefficients stored internally in the - // rbfmodel structure. - // - // By default, RBF uses QNN algorithm, which works well with - // relatively uniform datasets (all points are well separated, - // average distance is approximately same for all points). - // This default algorithm is perfectly suited for our simple - // made up data. - // - // NOTE: we recommend you to take a look at example of RBF-ML, - // multilayer RBF algorithm, which sometimes is a better - // option than QNN. - // - rbfreport rep; - rbfsetalgoqnn(model); - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 +This function has different cost for different types of matrices: +* for hash-based matrices it involves complete pass over entire hash-table + with O(NNZ) cost, where NNZ is number of non-zero elements +* for CRS and SKS matrix types cost of counting is O(N) (N - matrix size). - // - // Step 4: model was built - // - // After call of rbfbuildmodel(), rbfcalc2() will return - // value of the new model. - // - v = rbfcalc2(model, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 2.500 - return 0; -} +RESULT: number of non-zero elements strictly above main diagonal + -- ALGLIB PROJECT -- + Copyright 12.02.2014 by Bochkanov Sergey +*************************************************************************/ +
    ae_int_t alglib::sparsegetuppercount( + sparsematrix s, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +
    /************************************************************************* +This function checks matrix storage format and returns True when matrix is +stored using CRS representation. -using namespace alglib; +INPUT PARAMETERS: + S - sparse matrix. +RESULT: + True if matrix type is CRS + False if matrix type is not CRS -int main(int argc, char **argv) -{ - // - // This example show how to serialize and unserialize RBF model - // - // Suppose that we have set of 2-dimensional points with associated - // scalar function values, and we want to build a RBF model using - // our data. Then we want to serialize it to string and to unserialize - // from string, loading to another instance of RBF model. - // - // Here we assume that you already know how to create RBF models. - // - std::string s; - double v; - rbfmodel model0; - rbfmodel model1; - real_2d_array xy = "[[-1,0,2],[+1,0,3]]"; - rbfreport rep; + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    bool alglib::sparseiscrs( + sparsematrix s, + const xparams _params = alglib::xdefault); - // model initialization - rbfcreate(2, 1, model0); - rbfsetpoints(model0, xy); - rbfsetalgoqnn(model0); - rbfbuildmodel(model0, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 +
    + +
    +
    /************************************************************************* +This function checks matrix storage format and returns True when matrix is +stored using Hash table representation. - // - // Serialization - it looks easy, - // but you should carefully read next section. - // - alglib::rbfserialize(model0, s); - alglib::rbfunserialize(s, model1); +INPUT PARAMETERS: + S - sparse matrix. - // both models return same value - v = rbfcalc2(model0, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 2.500 - v = rbfcalc2(model1, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 2.500 +RESULT: + True if matrix type is Hash table + False if matrix type is not Hash table - // - // Previous section shows that model state is saved/restored during - // serialization. However, some properties are NOT serialized. - // - // Serialization saves/restores RBF model, but it does NOT saves/restores - // settings which were used to build current model. In particular, dataset - // which were used to build model, is not preserved. - // - // What does it mean in for us? - // - // Do you remember this sequence: rbfcreate-rbfsetpoints-rbfbuildmodel? - // First step creates model, second step adds dataset and tunes model - // settings, third step builds model using current dataset and model - // construction settings. - // - // If you call rbfbuildmodel() without calling rbfsetpoints() first, you - // will get empty (zero) RBF model. In our example, model0 contains - // dataset which was added by rbfsetpoints() call. However, model1 does - // NOT contain dataset - because dataset is NOT serialized. - // - // This, if we call rbfbuildmodel(model0,rep), we will get same model, - // which returns 2.5 at (x,y)=(0,0). However, after same call model1 will - // return zero - because it contains RBF model (coefficients), but does NOT - // contain dataset which was used to build this model. - // - // Basically, it means that: - // * serialization of the RBF model preserves anything related to the model - // EVALUATION - // * but it does NOT creates perfect copy of the original object. - // - rbfbuildmodel(model0, rep); - v = rbfcalc2(model0, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 2.500 + -- ALGLIB PROJECT -- + Copyright 20.07.2012 by Bochkanov Sergey +*************************************************************************/ +
    bool alglib::sparseishash( + sparsematrix s, + const xparams _params = alglib::xdefault); - rbfbuildmodel(model1, rep); - v = rbfcalc2(model1, 0.0, 0.0); - printf("%.2f\n", double(v)); // EXPECTED: 0.000 - return 0; -} +
    + +
    +
    /************************************************************************* +This function checks matrix storage format and returns True when matrix is +stored using SKS representation. +INPUT PARAMETERS: + S - sparse matrix. -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +RESULT:
    +    True if matrix type is SKS
    +    False if matrix type is not SKS
     
    -using namespace alglib;
    +  -- ALGLIB PROJECT --
    +     Copyright 20.07.2012 by Bochkanov Sergey
    +*************************************************************************/
    +
    bool alglib::sparseissks( + sparsematrix s, + const xparams _params = alglib::xdefault); +
    + +
    +
    /************************************************************************* +This function calculates matrix-matrix product S*A. Matrix S must be +stored in CRS or SKS format (exception will be thrown otherwise). -int main(int argc, char **argv) -{ - // - // Suppose that we have set of 2-dimensional points with associated VECTOR - // function values, and we want to build a RBF model using our data. - // - // Typical sequence of steps is given below: - // 1. we create RBF model object - // 2. we attach our dataset to the RBF model and tune algorithm settings - // 3. we rebuild RBF model using new data - // 4. we use RBF model (evaluate, serialize, etc.) - // - real_1d_array x; - real_1d_array y; +INPUT PARAMETERS + S - sparse M*N matrix in CRS or SKS format. + A - array[N][K], input dense matrix. For performance reasons + we make only quick checks - we check that array size + is at least N, but we do not check for NAN's or INF's. + K - number of columns of matrix (A). + B - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. - // - // Step 1: RBF model creation. - // - // We have to specify dimensionality of the space (equal to 2) and - // dimensionality of the function (2-dimensional vector function). - // - rbfmodel model; - rbfcreate(2, 2, model); +OUTPUT PARAMETERS + B - array[M][K], S*A - // New model is empty - it can be evaluated, - // but we just get zero value at any point. - x = "[+1,+1]"; - rbfcalc(model, x, y); - printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [0.000,0.000] +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. - // - // Step 2: we add dataset. - // - // XY arrays containt four points: - // * (x0,y0) = (+1,+1), f(x0,y0)=(0,-1) - // * (x1,y1) = (+1,-1), f(x1,y1)=(-1,0) - // * (x2,y2) = (-1,-1), f(x2,y2)=(0,+1) - // * (x3,y3) = (-1,+1), f(x3,y3)=(+1,0) - // - // By default, RBF uses QNN algorithm, which works well with - // relatively uniform datasets (all points are well separated, - // average distance is approximately same for all points). - // - // This default algorithm is perfectly suited for our simple - // made up data. - // - real_2d_array xy = "[[+1,+1,0,-1],[+1,-1,-1,0],[-1,-1,0,+1],[-1,+1,+1,0]]"; - rbfsetpoints(model, xy); + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsemm( + sparsematrix s, + real_2d_array a, + ae_int_t k, + real_2d_array& b, + const xparams _params = alglib::xdefault); - // We added points, but model was not rebuild yet. - // If we call rbfcalc(), we still will get 0.0 as result. - rbfcalc(model, x, y); - printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [0.000,0.000] +
    + +
    +
    /************************************************************************* +This function simultaneously calculates two matrix-matrix products: + S*A and S^T*A. +S must be square (non-rectangular) matrix stored in CRS or SKS format +(exception will be thrown otherwise). - // - // Step 3: rebuild model - // - // After we've configured model, we should rebuild it - - // it will change coefficients stored internally in the - // rbfmodel structure. - // - rbfreport rep; - rbfbuildmodel(model, rep); - printf("%d\n", int(rep.terminationtype)); // EXPECTED: 1 +INPUT PARAMETERS + S - sparse N*N matrix in CRS or SKS format. + A - array[N][K], input dense matrix. For performance reasons + we make only quick checks - we check that array size is + at least N, but we do not check for NAN's or INF's. + K - number of columns of matrix (A). + B0 - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. + B1 - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. - // - // Step 4: model was built - // - // After call of rbfbuildmodel(), rbfcalc() will return - // value of the new model. - // - rbfcalc(model, x, y); - printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [0.000,-1.000] - return 0; -} +OUTPUT PARAMETERS + B0 - array[N][K], S*A + B1 - array[N][K], S^T*A + +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::sparsemm2( + sparsematrix s, + real_2d_array a, + ae_int_t k, + real_2d_array& b0, + real_2d_array& b1, + const xparams _params = alglib::xdefault); -
    - - + +
     
    /************************************************************************* -Estimate of the condition number of a matrix given by its LU decomposition (1-norm) +This function calculates matrix-matrix product S^T*A. Matrix S must be +stored in CRS or SKS format (exception will be thrown otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse M*N matrix in CRS or SKS format. + A - array[M][K], input dense matrix. For performance reasons + we make only quick checks - we check that array size is + at least M, but we do not check for NAN's or INF's. + K - number of columns of matrix (A). + B - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. -Input parameters: - LUA - LU decomposition of a matrix in compact form. Output of - the CMatrixLU subroutine. - N - size of matrix A. +OUTPUT PARAMETERS + B - array[N][K], S^T*A -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cmatrixlurcond1(complex_2d_array lua, ae_int_t n); +
    void alglib::sparsemtm( + sparsematrix s, + real_2d_array a, + ae_int_t k, + real_2d_array& b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Estimate of the condition number of a matrix given by its LU decomposition -(infinity norm). +This function calculates matrix-vector product S^T*x. Matrix S must be +stored in CRS or SKS format (exception will be thrown otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse M*N matrix in CRS or SKS format. + X - array[M], input vector. For performance reasons we + make only quick checks - we check that array size is + at least M, but we do not check for NAN's or INF's. + Y - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. -Input parameters: - LUA - LU decomposition of a matrix in compact form. Output of - the CMatrixLU subroutine. - N - size of matrix A. +OUTPUT PARAMETERS + Y - array[N], S^T*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cmatrixlurcondinf(complex_2d_array lua, ae_int_t n); +
    void alglib::sparsemtv( + sparsematrix s, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Estimate of a matrix condition number (1-norm) +This function calculates matrix-vector product S*x. Matrix S must be +stored in CRS or SKS format (exception will be thrown otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse M*N matrix in CRS or SKS format. + X - array[N], input vector. For performance reasons we + make only quick checks - we check that array size is + at least N, but we do not check for NAN's or INF's. + Y - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. +OUTPUT PARAMETERS + Y - array[M], S*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cmatrixrcond1(complex_2d_array a, ae_int_t n); +
    void alglib::sparsemv( + sparsematrix s, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Estimate of a matrix condition number (infinity-norm). +This function simultaneously calculates two matrix-vector products: + S*x and S^T*x. +S must be square (non-rectangular) matrix stored in CRS or SKS format +(exception will be thrown otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse N*N matrix in CRS or SKS format. + X - array[N], input vector. For performance reasons we + make only quick checks - we check that array size is + at least N, but we do not check for NAN's or INF's. + Y0 - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. + Y1 - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. +OUTPUT PARAMETERS + Y0 - array[N], S*x + Y1 - array[N], S^T*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cmatrixrcondinf(complex_2d_array a, ae_int_t n); +
    void alglib::sparsemv2( + sparsematrix s, + real_1d_array x, + real_1d_array& y0, + real_1d_array& y1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Triangular matrix: estimate of a condition number (1-norm) - -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). - -Input parameters: - A - matrix. Array[0..N-1, 0..N-1]. - N - size of A. - IsUpper - True, if the matrix is upper triangular. - IsUnit - True, if the matrix has a unit diagonal. - -Result: 1/LowerBound(cond(A)) +This procedure resizes Hash-Table matrix. It can be called when you have +deleted too many elements from the matrix, and you want to free unneeded +memory. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cmatrixtrrcond1( - complex_2d_array a, - ae_int_t n, - bool isupper, - bool isunit); +
    void alglib::sparseresizematrix( + sparsematrix s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Triangular matrix: estimate of a matrix condition number (infinity-norm). +This function rewrites existing (non-zero) element. It returns True if +element exists or False, when it is called for non-existing (zero) +element. -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +This function works with any kind of the matrix. -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - True, if the matrix is upper triangular. - IsUnit - True, if the matrix has a unit diagonal. +The purpose of this function is to provide convenient thread-safe way to +modify sparse matrix. Such modification (already existing element is +rewritten) is guaranteed to be thread-safe without any synchronization, as +long as different threads modify different elements. -Result: 1/LowerBound(cond(A)) +INPUT PARAMETERS + S - sparse M*N matrix in any kind of representation + (Hash, SKS, CRS). + I - row index of non-zero element to modify, 0<=I<M + J - column index of non-zero element to modify, 0<=J<N + V - value to rewrite, must be finite number -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. +OUTPUT PARAMETERS + S - modified matrix +RESULT + True in case when element exists + False in case when element doesn't exist or it is zero + + -- ALGLIB PROJECT -- + Copyright 14.03.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::cmatrixtrrcondinf( - complex_2d_array a, - ae_int_t n, - bool isupper, - bool isunit); +
    bool alglib::sparserewriteexisting( + sparsematrix s, + ae_int_t i, + ae_int_t j, + double v, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Condition number estimate of a Hermitian positive definite matrix given by -Cholesky decomposition. +This function modifies S[i,j] - element of the sparse matrix. -The algorithm calculates a lower bound of the condition number. In this -case, the algorithm does not return a lower bound of the condition number, -but an inverse number (to avoid an overflow in case of a singular matrix). +For Hash-based storage format: +* this function can be called at any moment - during matrix initialization + or later +* new value can be zero or non-zero. In case new value of S[i,j] is zero, + this element is deleted from the table. +* this function has no effect when called with zero V for non-existent + element. -It should be noted that 1-norm and inf-norm condition numbers of symmetric -matrices are equal, so the algorithm doesn't take into account the -differences between these types of norms. +For CRS-bases storage format: +* this function can be called ONLY DURING MATRIX INITIALIZATION +* zero values are stored in the matrix similarly to non-zero ones +* elements must be initialized in correct order - from top row to bottom, + within row - from left to right. -Input parameters: - CD - Cholesky decomposition of matrix A, - output of SMatrixCholesky subroutine. - N - size of matrix A. +For SKS storage: +* this function can be called at any moment - during matrix initialization + or later +* zero values are stored in the matrix similarly to non-zero ones +* this function CAN NOT be called for non-existent (outside of the band + specified during SKS matrix creation) elements. Say, if you created SKS + matrix with bandwidth=2 and tried to call sparseset(s,0,10,VAL), an + exception will be generated. -Result: 1/LowerBound(cond(A)) +INPUT PARAMETERS + S - sparse M*N matrix in Hash-Table, SKS or CRS format. + I - row index of the element to modify, 0<=I<M + J - column index of the element to modify, 0<=J<N + V - value to set, must be finite number, can be zero -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. +OUTPUT PARAMETERS + S - modified matrix + + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::hpdmatrixcholeskyrcond( - complex_2d_array a, - ae_int_t n, - bool isupper); +
    void alglib::sparseset( + sparsematrix s, + ae_int_t i, + ae_int_t j, + double v, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -Condition number estimate of a Hermitian positive definite matrix. - -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +This function calculates matrix-matrix product S*A, when S is symmetric +matrix. Matrix S must be stored in CRS or SKS format (exception will be +thrown otherwise). -It should be noted that 1-norm and inf-norm of condition numbers of symmetric -matrices are equal, so the algorithm doesn't take into account the -differences between these types of norms. +INPUT PARAMETERS + S - sparse M*M matrix in CRS or SKS format. + IsUpper - whether upper or lower triangle of S is given: + * if upper triangle is given, only S[i,j] for j>=i + are used, and lower triangle is ignored (it can be + empty - these elements are not referenced at all). + * if lower triangle is given, only S[i,j] for j<=i + are used, and upper triangle is ignored. + A - array[N][K], input dense matrix. For performance reasons + we make only quick checks - we check that array size is + at least N, but we do not check for NAN's or INF's. + K - number of columns of matrix (A). + B - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. -Input parameters: - A - Hermitian positive definite matrix which is given by its - upper or lower triangle depending on the value of - IsUpper. Array with elements [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - storage format. +OUTPUT PARAMETERS + B - array[M][K], S*A -Result: - 1/LowerBound(cond(A)), if matrix A is positive definite, - -1, if matrix A is not positive definite, and its condition number - could not be found by this algorithm. +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::hpdmatrixrcond( - complex_2d_array a, - ae_int_t n, - bool isupper); +
    void alglib::sparsesmm( + sparsematrix s, + bool isupper, + real_2d_array a, + ae_int_t k, + real_2d_array& b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Estimate of the condition number of a matrix given by its LU decomposition (1-norm) +This function calculates matrix-vector product S*x, when S is symmetric +matrix. Matrix S must be stored in CRS or SKS format (exception will be +thrown otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse M*M matrix in CRS or SKS format. + IsUpper - whether upper or lower triangle of S is given: + * if upper triangle is given, only S[i,j] for j>=i + are used, and lower triangle is ignored (it can be + empty - these elements are not referenced at all). + * if lower triangle is given, only S[i,j] for j<=i + are used, and upper triangle is ignored. + X - array[N], input vector. For performance reasons we + make only quick checks - we check that array size is + at least N, but we do not check for NAN's or INF's. + Y - output buffer, possibly preallocated. In case buffer + size is too small to store result, this buffer is + automatically resized. -Input parameters: - LUA - LU decomposition of a matrix in compact form. Output of - the RMatrixLU subroutine. - N - size of matrix A. +OUTPUT PARAMETERS + Y - array[M], S*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 14.10.2011 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixlurcond1(real_2d_array lua, ae_int_t n); +
    void alglib::sparsesmv( + sparsematrix s, + bool isupper, + real_1d_array x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Estimate of the condition number of a matrix given by its LU decomposition -(infinity norm). - -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). - -Input parameters: - LUA - LU decomposition of a matrix in compact form. Output of - the RMatrixLU subroutine. - N - size of matrix A. - -Result: 1/LowerBound(cond(A)) +This function efficiently swaps contents of S0 and S1. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 16.01.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixlurcondinf(real_2d_array lua, ae_int_t n); +
    void alglib::sparseswap( + sparsematrix s0, + sparsematrix s1, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Estimate of a matrix condition number (1-norm) +This function performs transpose of CRS matrix. -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse matrix in CRS format. -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. +OUTPUT PARAMETERS + S - sparse matrix, transposed. -Result: 1/LowerBound(cond(A)) +NOTE: internal temporary copy is allocated for the purposes of + transposition. It is deallocated after transposition. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 30.01.2018 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixrcond1(real_2d_array a, ae_int_t n); +
    void alglib::sparsetransposecrs( + sparsematrix s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Estimate of a matrix condition number (infinity-norm). +This function performs efficient in-place transpose of SKS matrix. No +additional memory is allocated during transposition. -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +This function supports only skyline storage format (SKS). -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. +INPUT PARAMETERS + S - sparse matrix in SKS format. -Result: 1/LowerBound(cond(A)) +OUTPUT PARAMETERS + S - sparse matrix, transposed. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 16.01.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixrcondinf(real_2d_array a, ae_int_t n); +
    void alglib::sparsetransposesks( + sparsematrix s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Triangular matrix: estimate of a condition number (1-norm) +This function calculates matrix-vector product op(S)*x, when x is vector, +S is symmetric triangular matrix, op(S) is transposition or no operation. +Matrix S must be stored in CRS or SKS format (exception will be thrown +otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse square matrix in CRS or SKS format. + IsUpper - whether upper or lower triangle of S is used: + * if upper triangle is given, only S[i,j] for j>=i + are used, and lower triangle is ignored (it can be + empty - these elements are not referenced at all). + * if lower triangle is given, only S[i,j] for j<=i + are used, and upper triangle is ignored. + IsUnit - unit or non-unit diagonal: + * if True, diagonal elements of triangular matrix are + considered equal to 1.0. Actual elements stored in + S are not referenced at all. + * if False, diagonal stored in S is used + OpType - operation type: + * if 0, S*x is calculated + * if 1, (S^T)*x is calculated (transposition) + X - array[N] which stores input vector. For performance + reasons we make only quick checks - we check that + array size is at least N, but we do not check for + NAN's or INF's. + Y - possibly preallocated input buffer. Automatically + resized if its size is too small. -Input parameters: - A - matrix. Array[0..N-1, 0..N-1]. - N - size of A. - IsUpper - True, if the matrix is upper triangular. - IsUnit - True, if the matrix has a unit diagonal. +OUTPUT PARAMETERS + Y - array[N], op(S)*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 20.01.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixtrrcond1( - real_2d_array a, - ae_int_t n, +
    void alglib::sparsetrmv( + sparsematrix s, bool isupper, - bool isunit); + bool isunit, + ae_int_t optype, + real_1d_array& x, + real_1d_array& y, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Triangular matrix: estimate of a matrix condition number (infinity-norm). +This function solves linear system op(S)*y=x where x is vector, S is +symmetric triangular matrix, op(S) is transposition or no operation. +Matrix S must be stored in CRS or SKS format (exception will be thrown +otherwise). -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +INPUT PARAMETERS + S - sparse square matrix in CRS or SKS format. + IsUpper - whether upper or lower triangle of S is used: + * if upper triangle is given, only S[i,j] for j>=i + are used, and lower triangle is ignored (it can be + empty - these elements are not referenced at all). + * if lower triangle is given, only S[i,j] for j<=i + are used, and upper triangle is ignored. + IsUnit - unit or non-unit diagonal: + * if True, diagonal elements of triangular matrix are + considered equal to 1.0. Actual elements stored in + S are not referenced at all. + * if False, diagonal stored in S is used. It is your + responsibility to make sure that diagonal is + non-zero. + OpType - operation type: + * if 0, S*x is calculated + * if 1, (S^T)*x is calculated (transposition) + X - array[N] which stores input vector. For performance + reasons we make only quick checks - we check that + array size is at least N, but we do not check for + NAN's or INF's. -Input parameters: - A - matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - True, if the matrix is upper triangular. - IsUnit - True, if the matrix has a unit diagonal. +OUTPUT PARAMETERS + X - array[N], inv(op(S))*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. + You must convert your matrix with SparseConvertToCRS/SKS() before + using this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. +NOTE: no assertion or tests are done during algorithm operation. It is + your responsibility to provide invertible matrix to algorithm. + + -- ALGLIB PROJECT -- + Copyright 20.01.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::rmatrixtrrcondinf( - real_2d_array a, - ae_int_t n, +
    void alglib::sparsetrsv( + sparsematrix s, bool isupper, - bool isunit); + bool isunit, + ae_int_t optype, + real_1d_array& x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Condition number estimate of a symmetric positive definite matrix given by -Cholesky decomposition. - -The algorithm calculates a lower bound of the condition number. In this -case, the algorithm does not return a lower bound of the condition number, -but an inverse number (to avoid an overflow in case of a singular matrix). +This function calculates vector-matrix-vector product x'*S*x, where S is +symmetric matrix. Matrix S must be stored in CRS or SKS format (exception +will be thrown otherwise). -It should be noted that 1-norm and inf-norm condition numbers of symmetric -matrices are equal, so the algorithm doesn't take into account the -differences between these types of norms. +INPUT PARAMETERS + S - sparse M*M matrix in CRS or SKS format. + IsUpper - whether upper or lower triangle of S is given: + * if upper triangle is given, only S[i,j] for j>=i + are used, and lower triangle is ignored (it can be + empty - these elements are not referenced at all). + * if lower triangle is given, only S[i,j] for j<=i + are used, and upper triangle is ignored. + X - array[N], input vector. For performance reasons we + make only quick checks - we check that array size is + at least N, but we do not check for NAN's or INF's. -Input parameters: - CD - Cholesky decomposition of matrix A, - output of SMatrixCholesky subroutine. - N - size of matrix A. +RESULT + x'*S*x -Result: 1/LowerBound(cond(A)) +NOTE: this function throws exception when called for non-CRS/SKS matrix. +You must convert your matrix with SparseConvertToCRS/SKS() before using +this function. -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. + -- ALGLIB PROJECT -- + Copyright 27.01.2014 by Bochkanov Sergey *************************************************************************/ -
    double alglib::spdmatrixcholeskyrcond( - real_2d_array a, - ae_int_t n, - bool isupper); +
    double alglib::sparsevsmv( + sparsematrix s, + bool isupper, + real_1d_array x, + const xparams _params = alglib::xdefault);
    - +
    -
    /************************************************************************* -Condition number estimate of a symmetric positive definite matrix. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "linalg.h" -The algorithm calculates a lower bound of the condition number. In this case, -the algorithm does not return a lower bound of the condition number, but an -inverse number (to avoid an overflow in case of a singular matrix). +using namespace alglib; -It should be noted that 1-norm and inf-norm of condition numbers of symmetric -matrices are equal, so the algorithm doesn't take into account the -differences between these types of norms. -Input parameters: - A - symmetric positive definite matrix which is given by its - upper or lower triangle depending on the value of - IsUpper. Array with elements [0..N-1, 0..N-1]. - N - size of matrix A. - IsUpper - storage format. +int main(int argc, char **argv) +{ + // + // This example demonstrates creation/initialization of the sparse matrix + // and matrix-vector multiplication. + // + // First, we have to create matrix and initialize it. Matrix is initially created + // in the Hash-Table format, which allows convenient initialization. We can modify + // Hash-Table matrix with sparseset() and sparseadd() functions. + // + // NOTE: Unlike CRS format, Hash-Table representation allows you to initialize + // elements in the arbitrary order. You may see that we initialize a[0][0] first, + // then move to the second row, and then move back to the first row. + // + sparsematrix s; + sparsecreate(2, 2, s); + sparseset(s, 0, 0, 2.0); + sparseset(s, 1, 1, 1.0); + sparseset(s, 0, 1, 1.0); -Result: - 1/LowerBound(cond(A)), if matrix A is positive definite, - -1, if matrix A is not positive definite, and its condition number - could not be found by this algorithm. + sparseadd(s, 1, 1, 4.0); -NOTE: - if k(A) is very large, then matrix is assumed degenerate, k(A)=INF, - 0.0 is returned in such cases. -*************************************************************************/ -
    double alglib::spdmatrixrcond(real_2d_array a, ae_int_t n, bool isupper); + // + // Now S is equal to + // [ 2 1 ] + // [ 5 ] + // Lets check it by reading matrix contents with sparseget(). + // You may see that with sparseget() you may read both non-zero + // and zero elements. + // + double v; + v = sparseget(s, 0, 0); + printf("%.2f\n", double(v)); // EXPECTED: 2.0000 + v = sparseget(s, 0, 1); + printf("%.2f\n", double(v)); // EXPECTED: 1.0000 + v = sparseget(s, 1, 0); + printf("%.2f\n", double(v)); // EXPECTED: 0.0000 + v = sparseget(s, 1, 1); + printf("%.2f\n", double(v)); // EXPECTED: 5.0000 -
    - -
    - -rmatrixschur
    - - -
    - -
    -
    /************************************************************************* -Subroutine performing the Schur decomposition of a general matrix by using -the QR algorithm with multiple shifts. + // + // After successful creation we can use our matrix for linear operations. + // + // However, there is one more thing we MUST do before using S in linear + // operations: we have to convert it from HashTable representation (used for + // initialization and dynamic operations) to CRS format with sparseconverttocrs() + // call. If you omit this call, ALGLIB will generate exception on the first + // attempt to use S in linear operations. + // + sparseconverttocrs(s); -COMMERCIAL EDITION OF ALGLIB: + // + // Now S is in the CRS format and we are ready to do linear operations. + // Lets calculate A*x for some x. + // + real_1d_array x = "[1,-1]"; + real_1d_array y = "[]"; + sparsemv(s, x, y); + printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [1.000,-5.000] + return 0; +} - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. - ! - ! Multithreaded acceleration is NOT supported for this function. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. -The source matrix A is represented as S'*A*S = T, where S is an orthogonal -matrix (Schur vectors), T - upper quasi-triangular matrix (with blocks of -sizes 1x1 and 2x2 on the main diagonal). +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "linalg.h"
     
    -Input parameters:
    -    A   -   matrix to be decomposed.
    -            Array whose indexes range within [0..N-1, 0..N-1].
    -    N   -   size of A, N>=0.
    +using namespace alglib;
     
     
    -Output parameters:
    -    A   -   contains matrix T.
    -            Array whose indexes range within [0..N-1, 0..N-1].
    -    S   -   contains Schur vectors.
    -            Array whose indexes range within [0..N-1, 0..N-1].
    +int main(int argc, char **argv)
    +{
    +    //
    +    // This example demonstrates creation/initialization of the sparse matrix in the
    +    // CRS format.
    +    //
    +    // Hash-Table format used by default is very convenient (it allows easy
    +    // insertion of elements, automatic memory reallocation), but has
    +    // significant memory and performance overhead. Insertion of one element 
    +    // costs hundreds of CPU cycles, and memory consumption is several times
    +    // higher than that of CRS.
    +    //
    +    // When you work with really large matrices and when you can tell in 
    +    // advance how many elements EXACTLY you need, it can be beneficial to 
    +    // create matrix in the CRS format from the very beginning.
    +    //
    +    // If you want to create matrix in the CRS format, you should:
    +    // * use sparsecreatecrs() function
    +    // * know row sizes in advance (number of non-zero entries in the each row)
    +    // * initialize matrix with sparseset() - another function, sparseadd(), is not allowed
    +    // * initialize elements from left to right, from top to bottom, each
    +    //   element is initialized only once.
    +    //
    +    sparsematrix s;
    +    integer_1d_array row_sizes = "[2,2,2,1]";
    +    sparsecreatecrs(4, 4, row_sizes, s);
    +    sparseset(s, 0, 0, 2.0);
    +    sparseset(s, 0, 1, 1.0);
    +    sparseset(s, 1, 1, 4.0);
    +    sparseset(s, 1, 2, 2.0);
    +    sparseset(s, 2, 2, 3.0);
    +    sparseset(s, 2, 3, 1.0);
    +    sparseset(s, 3, 3, 9.0);
     
    -Note 1:
    -    The block structure of matrix T can be easily recognized: since all
    -    the elements below the blocks are zeros, the elements a[i+1,i] which
    -    are equal to 0 show the block border.
    +    //
    +    // Now S is equal to
    +    //   [ 2 1     ]
    +    //   [   4 2   ]
    +    //   [     3 1 ]
    +    //   [       9 ]
    +    //
    +    // We should point that we have initialized S elements from left to right,
    +    // from top to bottom. CRS representation does NOT allow you to do so in
    +    // the different order. Try to change order of the sparseset() calls above,
    +    // and you will see that your program generates exception.
    +    //
    +    // We can check it by reading matrix contents with sparseget().
    +    // However, you should remember that sparseget() is inefficient on
    +    // CRS matrices (it may have to pass through all elements of the row 
    +    // until it finds element you need).
    +    //
    +    double v;
    +    v = sparseget(s, 0, 0);
    +    printf("%.2f\n", double(v)); // EXPECTED: 2.0000
    +    v = sparseget(s, 2, 3);
    +    printf("%.2f\n", double(v)); // EXPECTED: 1.0000
     
    -Note 2:
    -    The algorithm performance depends on the value of the internal parameter
    -    NS of the InternalSchurDecomposition subroutine which defines the number
    -    of shifts in the QR algorithm (similarly to the block width in block-matrix
    -    algorithms in linear algebra). If you require maximum performance on
    -    your machine, it is recommended to adjust this parameter manually.
    +    // you may see that you can read zero elements (which are not stored) with sparseget()
    +    v = sparseget(s, 3, 2);
    +    printf("%.2f\n", double(v)); // EXPECTED: 0.0000
     
    -Result:
    -    True,
    -        if the algorithm has converged and parameters A and S contain the result.
    -    False,
    -        if the algorithm has not converged.
    +    //
    +    // After successful creation we can use our matrix for linear operations.
    +    // Lets calculate A*x for some x.
    +    //
    +    real_1d_array x = "[1,-1,1,-1]";
    +    real_1d_array y = "[]";
    +    sparsemv(s, x, y);
    +    printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [1.000,-2.000,2.000,-9]
    +    return 0;
    +}
     
    -Algorithm implemented on the basis of the DHSEQR subroutine (LAPACK 3.0 library).
    -*************************************************************************/
    -
    bool alglib::rmatrixschur(real_2d_array& a, ae_int_t n, real_2d_array& s); -
    - + - -
    -
    /************************************************************************* -Temporary buffers for sparse matrix operations. - -You should pass an instance of this structure to factorization functions. -It allows to reuse memory during repeated sparse factorizations. You do -not have to call some initialization function - simply passing an instance -to factorization function is enough. -*************************************************************************/ -
    class sparsebuffers -{ -}; - -
    - +
     
    /************************************************************************* -Sparse matrix structure. - -You should use ALGLIB functions to work with sparse matrix. Never try to -access its fields directly! +Algorithm for solving the following generalized symmetric positive-definite +eigenproblem: + A*x = lambda*B*x (1) or + A*B*x = lambda*x (2) or + B*A*x = lambda*x (3). +where A is a symmetric matrix, B - symmetric positive-definite matrix. +The problem is solved by reducing it to an ordinary symmetric eigenvalue +problem. -NOTES ON THE SPARSE STORAGE FORMATS +Input parameters: + A - symmetric matrix which is given by its upper or lower + triangular part. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrices A and B. + IsUpperA - storage format of matrix A. + B - symmetric positive-definite matrix which is given by + its upper or lower triangular part. + Array whose indexes range within [0..N-1, 0..N-1]. + IsUpperB - storage format of matrix B. + ZNeeded - if ZNeeded is equal to: + * 0, the eigenvectors are not returned; + * 1, the eigenvectors are returned. + ProblemType - if ProblemType is equal to: + * 1, the following problem is solved: A*x = lambda*B*x; + * 2, the following problem is solved: A*B*x = lambda*x; + * 3, the following problem is solved: B*A*x = lambda*x. -Sparse matrices can be stored using several formats: -* Hash-Table representation -* Compressed Row Storage (CRS) -* Skyline matrix storage (SKS) +Output parameters: + D - eigenvalues in ascending order. + Array whose index ranges within [0..N-1]. + Z - if ZNeeded is equal to: + * 0, Z hasn't changed; + * 1, Z contains eigenvectors. + Array whose indexes range within [0..N-1, 0..N-1]. + The eigenvectors are stored in matrix columns. It should + be noted that the eigenvectors in such problems do not + form an orthogonal system. -Each of the formats has benefits and drawbacks: -* Hash-table is good for dynamic operations (insertion of new elements), - but does not support linear algebra operations -* CRS is good for operations like matrix-vector or matrix-matrix products, - but its initialization is less convenient - you have to tell row sizes - at the initialization, and you have to fill matrix only row by row, - from left to right. -* SKS is a special format which is used to store triangular factors from - Cholesky factorization. It does not support dynamic modification, and - support for linear algebra operations is very limited. +Result: + True, if the problem was solved successfully. + False, if the error occurred during the Cholesky decomposition of matrix + B (the matrix isn't positive-definite) or during the work of the iterative + algorithm for solving the symmetric eigenproblem. -Tables below outline information about these two formats: +See also the GeneralizedSymmetricDefiniteEVDReduce subroutine. - OPERATIONS WITH MATRIX HASH CRS SKS - creation + + + - SparseGet + + + - SparseRewriteExisting + + + - SparseSet + - SparseAdd + - SparseGetRow + + - SparseGetCompressedRow + + - sparse-dense linear algebra + + + -- ALGLIB -- + Copyright 1.28.2006 by Bochkanov Sergey *************************************************************************/ -
    class sparsematrix -{ -}; +
    bool alglib::smatrixgevd( + real_2d_array a, + ae_int_t n, + bool isuppera, + real_2d_array b, + bool isupperb, + ae_int_t zneeded, + ae_int_t problemtype, + real_1d_array& d, + real_2d_array& z, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function adds value to S[i,j] - element of the sparse matrix. Matrix -must be in a Hash-Table mode. - -In case S[i,j] already exists in the table, V i added to its value. In -case S[i,j] is non-existent, it is inserted in the table. Table -automatically grows when necessary. - -INPUT PARAMETERS - S - sparse M*N matrix in Hash-Table representation. - Exception will be thrown for CRS matrix. - I - row index of the element to modify, 0<=I<M - J - column index of the element to modify, 0<=J<N - V - value to add, must be finite number - -OUTPUT PARAMETERS - S - modified matrix - -NOTE 1: when S[i,j] is exactly zero after modification, it is deleted -from the table. - - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparseadd(sparsematrix s, ae_int_t i, ae_int_t j, double v); +Algorithm for reduction of the following generalized symmetric positive- +definite eigenvalue problem: + A*x = lambda*B*x (1) or + A*B*x = lambda*x (2) or + B*A*x = lambda*x (3) +to the symmetric eigenvalues problem C*y = lambda*y (eigenvalues of this and +the given problems are the same, and the eigenvectors of the given problem +could be obtained by multiplying the obtained eigenvectors by the +transformation matrix x = R*y). -
    -

    Examples:   [1]  

    - -
    -
    /************************************************************************* -This function performs in-place conversion to desired sparse storage -format. +Here A is a symmetric matrix, B - symmetric positive-definite matrix. -INPUT PARAMETERS - S0 - sparse matrix in any format. - Fmt - desired storage format of the output, as returned by - SparseGetMatrixType() function: - * 0 for hash-based storage - * 1 for CRS - * 2 for SKS +Input parameters: + A - symmetric matrix which is given by its upper or lower + triangular part. + Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrices A and B. + IsUpperA - storage format of matrix A. + B - symmetric positive-definite matrix which is given by + its upper or lower triangular part. + Array whose indexes range within [0..N-1, 0..N-1]. + IsUpperB - storage format of matrix B. + ProblemType - if ProblemType is equal to: + * 1, the following problem is solved: A*x = lambda*B*x; + * 2, the following problem is solved: A*B*x = lambda*x; + * 3, the following problem is solved: B*A*x = lambda*x. -OUTPUT PARAMETERS - S0 - sparse matrix in requested format. +Output parameters: + A - symmetric matrix which is given by its upper or lower + triangle depending on IsUpperA. Contains matrix C. + Array whose indexes range within [0..N-1, 0..N-1]. + R - upper triangular or low triangular transformation matrix + which is used to obtain the eigenvectors of a given problem + as the product of eigenvectors of C (from the right) and + matrix R (from the left). If the matrix is upper + triangular, the elements below the main diagonal + are equal to 0 (and vice versa). Thus, we can perform + the multiplication without taking into account the + internal structure (which is an easier though less + effective way). + Array whose indexes range within [0..N-1, 0..N-1]. + IsUpperR - type of matrix R (upper or lower triangular). -NOTE: in-place conversion wastes a lot of memory which is used to store - temporaries. If you perform a lot of repeated conversions, we - recommend to use out-of-place buffered conversion functions, like - SparseCopyToBuf(), which can reuse already allocated memory. +Result: + True, if the problem was reduced successfully. + False, if the error occurred during the Cholesky decomposition of + matrix B (the matrix is not positive-definite). - -- ALGLIB PROJECT -- - Copyright 16.01.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 1.28.2006 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparseconvertto(sparsematrix s0, ae_int_t fmt); +
    bool alglib::smatrixgevdreduce( + real_2d_array& a, + ae_int_t n, + bool isuppera, + real_2d_array b, + bool isupperb, + ae_int_t problemtype, + real_2d_array& r, + bool& isupperr, + const xparams _params = alglib::xdefault);
    - + + +
     
    /************************************************************************* -This function converts matrix to CRS format. - -Some algorithms (linear algebra ones, for example) require matrices in -CRS format. This function allows to perform in-place conversion. - -INPUT PARAMETERS - S - sparse M*N matrix in any format - -OUTPUT PARAMETERS - S - matrix in CRS format - -NOTE: this function has no effect when called with matrix which is - already in CRS mode. - -NOTE: this function allocates temporary memory to store a copy of the - matrix. If you perform a lot of repeated conversions, we recommend - you to use SparseCopyToCRSBuf() function, which can reuse - previously allocated memory. +Spline fitting report: + RMSError RMS error + AvgError average error + AvgRelError average relative error (for non-zero Y[I]) + MaxError maximum error - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey +Fields below are filled by obsolete functions (Spline1DFitCubic, +Spline1DFitHermite). Modern fitting functions do NOT fill these fields: + TaskRCond reciprocal of task's condition number *************************************************************************/ -
    void alglib::sparseconverttocrs(sparsematrix s); +
    class spline1dfitreport +{ + double taskrcond; + double rmserror; + double avgerror; + double avgrelerror; + double maxerror; +};
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This function performs in-place conversion to Hash table storage. - -INPUT PARAMETERS - S - sparse matrix in CRS format. - -OUTPUT PARAMETERS - S - sparse matrix in Hash table format. - -NOTE: this function has no effect when called with matrix which is - already in Hash table mode. - -NOTE: in-place conversion involves allocation of temporary arrays. If you - perform a lot of repeated in- place conversions, it may lead to - memory fragmentation. Consider using out-of-place SparseCopyToHashBuf() - function in this case. - - -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey +1-dimensional spline interpolant *************************************************************************/ -
    void alglib::sparseconverttohash(sparsematrix s); +
    class spline1dinterpolant +{ +};
    - +
     
    /************************************************************************* -This function performs in-place conversion to SKS format. +This subroutine builds Akima spline interpolant -INPUT PARAMETERS - S - sparse matrix in any format. +INPUT PARAMETERS: + X - spline nodes, array[0..N-1] + Y - function values, array[0..N-1] + N - points count (optional): + * N>=2 + * if given, only first N points are used to build spline + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) -OUTPUT PARAMETERS - S - sparse matrix in SKS format. +OUTPUT PARAMETERS: + C - spline interpolant -NOTE: this function has no effect when called with matrix which is - already in SKS mode. -NOTE: in-place conversion involves allocation of temporary arrays. If you - perform a lot of repeated in- place conversions, it may lead to - memory fragmentation. Consider using out-of-place SparseCopyToSKSBuf() - function in this case. +ORDER OF POINTS + +Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- - Copyright 15.01.2014 by Bochkanov Sergey + Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparseconverttosks(sparsematrix s); +
    void alglib::spline1dbuildakima( + real_1d_array x, + real_1d_array y, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault); +void alglib::spline1dbuildakima( + real_1d_array x, + real_1d_array y, + ae_int_t n, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function copies S0 to S1. -This function completely deallocates memory owned by S1 before creating a -copy of S0. If you want to reuse memory, use SparseCopyBuf. +This subroutine builds Catmull-Rom spline interpolant. -NOTE: this function does not verify its arguments, it just copies all -fields of the structure. +INPUT PARAMETERS: + X - spline nodes, array[0..N-1]. + Y - function values, array[0..N-1]. - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsecopy(sparsematrix s0, sparsematrix& s1); +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points are used to build spline + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundType - boundary condition type: + * -1 for periodic boundary condition + * 0 for parabolically terminated spline (default) + Tension - tension parameter: + * tension=0 corresponds to classic Catmull-Rom spline (default) + * 0<tension<1 corresponds to more general form - cardinal spline -
    - -
    -
    /************************************************************************* -This function copies S0 to S1. -Memory already allocated in S1 is reused as much as possible. +OUTPUT PARAMETERS: + C - spline interpolant -NOTE: this function does not verify its arguments, it just copies all -fields of the structure. - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsecopybuf(sparsematrix s0, sparsematrix s1); +ORDER OF POINTS -
    - -
    -
    /************************************************************************* -This function performs out-of-place conversion to desired sparse storage -format. S0 is copied to S1 and converted on-the-fly. Memory allocated in -S1 is reused to maximum extent possible. +Subroutine automatically sorts points, so caller may pass unsorted array. -INPUT PARAMETERS - S0 - sparse matrix in any format. - Fmt - desired storage format of the output, as returned by - SparseGetMatrixType() function: - * 0 for hash-based storage - * 1 for CRS - * 2 for SKS +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: -OUTPUT PARAMETERS - S1 - sparse matrix in requested format. +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 16.01.2014 by Bochkanov Sergey + Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecopytobuf( - sparsematrix s0, - ae_int_t fmt, - sparsematrix s1); +
    void alglib::spline1dbuildcatmullrom( + real_1d_array x, + real_1d_array y, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault); +void alglib::spline1dbuildcatmullrom( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t boundtype, + double tension, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function performs out-of-place conversion to CRS format. S0 is -copied to S1 and converted on-the-fly. +This subroutine builds cubic spline interpolant. -INPUT PARAMETERS - S0 - sparse matrix in any format. +INPUT PARAMETERS: + X - spline nodes, array[0..N-1]. + Y - function values, array[0..N-1]. -OUTPUT PARAMETERS - S1 - sparse matrix in CRS format. +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points are used to build spline + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundLType - boundary condition type for the left boundary + BoundL - left boundary condition (first or second derivative, + depending on the BoundLType) + BoundRType - boundary condition type for the right boundary + BoundR - right boundary condition (first or second derivative, + depending on the BoundRType) -NOTE: if S0 is stored as CRS, it is just copied without conversion. +OUTPUT PARAMETERS: + C - spline interpolant -NOTE: this function de-allocates memory occupied by S1 before starting CRS - conversion. If you perform a lot of repeated CRS conversions, it may - lead to memory fragmentation. In this case we recommend you to use - SparseCopyToCRSBuf() function which re-uses memory in S1 as much as - possible. +ORDER OF POINTS - -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsecopytocrs(sparsematrix s0, sparsematrix& s1); +Subroutine automatically sorts points, so caller may pass unsorted array. -
    - -
    -
    /************************************************************************* -This function performs out-of-place conversion to CRS format. S0 is -copied to S1 and converted on-the-fly. Memory allocated in S1 is reused to -maximum extent possible. +SETTING BOUNDARY VALUES: -INPUT PARAMETERS - S0 - sparse matrix in any format. - S1 - matrix which may contain some pre-allocated memory, or - can be just uninitialized structure. +The BoundLType/BoundRType parameters can have the following values: + * -1, which corresonds to the periodic (cyclic) boundary conditions. + In this case: + * both BoundLType and BoundRType must be equal to -1. + * BoundL/BoundR are ignored + * Y[last] is ignored (it is assumed to be equal to Y[first]). + * 0, which corresponds to the parabolically terminated spline + (BoundL and/or BoundR are ignored). + * 1, which corresponds to the first derivative boundary condition + * 2, which corresponds to the second derivative boundary condition + * by default, BoundType=0 is used -OUTPUT PARAMETERS - S1 - sparse matrix in CRS format. +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: -NOTE: if S0 is stored as CRS, it is just copied without conversion. +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey + Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecopytocrsbuf(sparsematrix s0, sparsematrix s1); +
    void alglib::spline1dbuildcubic( + real_1d_array x, + real_1d_array y, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault); +void alglib::spline1dbuildcubic( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t boundltype, + double boundl, + ae_int_t boundrtype, + double boundr, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function performs out-of-place conversion to Hash table storage -format. S0 is copied to S1 and converted on-the-fly. +This subroutine builds Hermite spline interpolant. -INPUT PARAMETERS - S0 - sparse matrix in any format. +INPUT PARAMETERS: + X - spline nodes, array[0..N-1] + Y - function values, array[0..N-1] + D - derivatives, array[0..N-1] + N - points count (optional): + * N>=2 + * if given, only first N points are used to build spline + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) -OUTPUT PARAMETERS - S1 - sparse matrix in Hash table format. +OUTPUT PARAMETERS: + C - spline interpolant. -NOTE: if S0 is stored as Hash-table, it is just copied without conversion. -NOTE: this function de-allocates memory occupied by S1 before starting - conversion. If you perform a lot of repeated conversions, it may - lead to memory fragmentation. In this case we recommend you to use - SparseCopyToHashBuf() function which re-uses memory in S1 as much as - possible. +ORDER OF POINTS + +Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey + Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecopytohash(sparsematrix s0, sparsematrix& s1); +
    void alglib::spline1dbuildhermite( + real_1d_array x, + real_1d_array y, + real_1d_array d, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault); +void alglib::spline1dbuildhermite( + real_1d_array x, + real_1d_array y, + real_1d_array d, + ae_int_t n, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function performs out-of-place conversion to Hash table storage -format. S0 is copied to S1 and converted on-the-fly. Memory allocated in -S1 is reused to maximum extent possible. +This subroutine builds linear spline interpolant -INPUT PARAMETERS - S0 - sparse matrix in any format. +INPUT PARAMETERS: + X - spline nodes, array[0..N-1] + Y - function values, array[0..N-1] + N - points count (optional): + * N>=2 + * if given, only first N points are used to build spline + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) -OUTPUT PARAMETERS - S1 - sparse matrix in Hash table format. +OUTPUT PARAMETERS: + C - spline interpolant -NOTE: if S0 is stored as Hash-table, it is just copied without conversion. + +ORDER OF POINTS + +Subroutine automatically sorts points, so caller may pass unsorted array. -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey + Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecopytohashbuf(sparsematrix s0, sparsematrix s1); +
    void alglib::spline1dbuildlinear( + real_1d_array x, + real_1d_array y, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault); +void alglib::spline1dbuildlinear( + real_1d_array x, + real_1d_array y, + ae_int_t n, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function performs out-of-place conversion to SKS storage format. -S0 is copied to S1 and converted on-the-fly. - -INPUT PARAMETERS - S0 - sparse matrix in any format. +This function builds monotone cubic Hermite interpolant. This interpolant +is monotonic in [x(0),x(n-1)] and is constant outside of this interval. -OUTPUT PARAMETERS - S1 - sparse matrix in SKS format. +In case y[] form non-monotonic sequence, interpolant is piecewise +monotonic. Say, for x=(0,1,2,3,4) and y=(0,1,2,1,0) interpolant will +monotonically grow at [0..2] and monotonically decrease at [2..4]. -NOTE: if S0 is stored as SKS, it is just copied without conversion. +INPUT PARAMETERS: + X - spline nodes, array[0..N-1]. Subroutine automatically + sorts points, so caller may pass unsorted array. + Y - function values, array[0..N-1] + N - the number of points(N>=2). -NOTE: this function de-allocates memory occupied by S1 before starting - conversion. If you perform a lot of repeated conversions, it may - lead to memory fragmentation. In this case we recommend you to use - SparseCopyToSKSBuf() function which re-uses memory in S1 as much as - possible. +OUTPUT PARAMETERS: + C - spline interpolant. - -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 21.06.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecopytosks(sparsematrix s0, sparsematrix& s1); +
    void alglib::spline1dbuildmonotone( + real_1d_array x, + real_1d_array y, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault); +void alglib::spline1dbuildmonotone( + real_1d_array x, + real_1d_array y, + ae_int_t n, + spline1dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function performs out-of-place conversion to SKS format. S0 is -copied to S1 and converted on-the-fly. Memory allocated in S1 is reused -to maximum extent possible. - -INPUT PARAMETERS - S0 - sparse matrix in any format. +This subroutine calculates the value of the spline at the given point X. -OUTPUT PARAMETERS - S1 - sparse matrix in SKS format. +INPUT PARAMETERS: + C - spline interpolant + X - point -NOTE: if S0 is stored as SKS, it is just copied without conversion. +Result: + S(x) -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey + Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecopytosksbuf(sparsematrix s0, sparsematrix s1); +
    double alglib::spline1dcalc( + spline1dinterpolant c, + double x, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -This function creates sparse matrix in a Hash-Table format. - -This function creates Hast-Table matrix, which can be converted to CRS -format after its initialization is over. Typical usage scenario for a -sparse matrix is: -1. creation in a Hash-Table format -2. insertion of the matrix elements -3. conversion to the CRS representation -4. matrix is passed to some linear algebra algorithm - -Some information about different matrix formats can be found below, in -the "NOTES" section. +This function solves following problem: given table y[] of function values +at old nodes x[] and new nodes x2[], it calculates and returns table of +function values y2[] (calculated at x2[]). -INPUT PARAMETERS - M - number of rows in a matrix, M>=1 - N - number of columns in a matrix, N>=1 - K - K>=0, expected number of non-zero elements in a matrix. - K can be inexact approximation, can be less than actual - number of elements (table will grow when needed) or - even zero). - It is important to understand that although hash-table - may grow automatically, it is better to provide good - estimate of data size. +This function yields same result as Spline1DBuildCubic() call followed by +sequence of Spline1DDiff() calls, but it can be several times faster when +called for ordered X[] and X2[]. -OUTPUT PARAMETERS - S - sparse M*N matrix in Hash-Table representation. - All elements of the matrix are zero. +INPUT PARAMETERS: + X - old spline nodes + Y - function values + X2 - new spline nodes -NOTE 1 +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points from X/Y are used + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundLType - boundary condition type for the left boundary + BoundL - left boundary condition (first or second derivative, + depending on the BoundLType) + BoundRType - boundary condition type for the right boundary + BoundR - right boundary condition (first or second derivative, + depending on the BoundRType) + N2 - new points count: + * N2>=2 + * if given, only first N2 points from X2 are used + * if not given, automatically detected from X2 size -Hash-tables use memory inefficiently, and they have to keep some amount -of the "spare memory" in order to have good performance. Hash table for -matrix with K non-zero elements will need C*K*(8+2*sizeof(int)) bytes, -where C is a small constant, about 1.5-2 in magnitude. +OUTPUT PARAMETERS: + F2 - function values at X2[] -CRS storage, from the other side, is more memory-efficient, and needs -just K*(8+sizeof(int))+M*sizeof(int) bytes, where M is a number of rows -in a matrix. +ORDER OF POINTS -When you convert from the Hash-Table to CRS representation, all unneeded -memory will be freed. +Subroutine automatically sorts points, so caller may pass unsorted array. +Function values are correctly reordered on return, so F2[I] is always +equal to S(X2[I]) independently of points order. -NOTE 2 +SETTING BOUNDARY VALUES: -Comments of SparseMatrix structure outline information about different -sparse storage formats. We recommend you to read them before starting to -use ALGLIB sparse matrices. +The BoundLType/BoundRType parameters can have the following values: + * -1, which corresonds to the periodic (cyclic) boundary conditions. + In this case: + * both BoundLType and BoundRType must be equal to -1. + * BoundL/BoundR are ignored + * Y[last] is ignored (it is assumed to be equal to Y[first]). + * 0, which corresponds to the parabolically terminated spline + (BoundL and/or BoundR are ignored). + * 1, which corresponds to the first derivative boundary condition + * 2, which corresponds to the second derivative boundary condition + * by default, BoundType=0 is used -NOTE 3 +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: -This function completely overwrites S with new sparse matrix. Previously -allocated storage is NOT reused. If you want to reuse already allocated -memory, call SparseCreateBuf function. +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey + Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecreate(ae_int_t m, ae_int_t n, sparsematrix& s); -void alglib::sparsecreate( - ae_int_t m, +
    void alglib::spline1dconvcubic( + real_1d_array x, + real_1d_array y, + real_1d_array x2, + real_1d_array& y2, + const xparams _params = alglib::xdefault); +void alglib::spline1dconvcubic( + real_1d_array x, + real_1d_array y, ae_int_t n, - ae_int_t k, - sparsematrix& s); + ae_int_t boundltype, + double boundl, + ae_int_t boundrtype, + double boundr, + real_1d_array x2, + ae_int_t n2, + real_1d_array& y2, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This version of SparseCreate function creates sparse matrix in Hash-Table -format, reusing previously allocated storage as much as possible. Read -comments for SparseCreate() for more information. +This function solves following problem: given table y[] of function values +at old nodes x[] and new nodes x2[], it calculates and returns table of +function values y2[], first and second derivatives d2[] and dd2[] +(calculated at x2[]). -INPUT PARAMETERS - M - number of rows in a matrix, M>=1 - N - number of columns in a matrix, N>=1 - K - K>=0, expected number of non-zero elements in a matrix. - K can be inexact approximation, can be less than actual - number of elements (table will grow when needed) or - even zero). - It is important to understand that although hash-table - may grow automatically, it is better to provide good - estimate of data size. - S - SparseMatrix structure which MAY contain some already - allocated storage. +This function yields same result as Spline1DBuildCubic() call followed by +sequence of Spline1DDiff() calls, but it can be several times faster when +called for ordered X[] and X2[]. -OUTPUT PARAMETERS - S - sparse M*N matrix in Hash-Table representation. - All elements of the matrix are zero. - Previously allocated storage is reused, if its size - is compatible with expected number of non-zeros K. +INPUT PARAMETERS: + X - old spline nodes + Y - function values + X2 - new spline nodes - -- ALGLIB PROJECT -- - Copyright 14.01.2014 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsecreatebuf(ae_int_t m, ae_int_t n, sparsematrix s); -void alglib::sparsecreatebuf( - ae_int_t m, - ae_int_t n, - ae_int_t k, - sparsematrix s); +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points from X/Y are used + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundLType - boundary condition type for the left boundary + BoundL - left boundary condition (first or second derivative, + depending on the BoundLType) + BoundRType - boundary condition type for the right boundary + BoundR - right boundary condition (first or second derivative, + depending on the BoundRType) + N2 - new points count: + * N2>=2 + * if given, only first N2 points from X2 are used + * if not given, automatically detected from X2 size -
    - -
    -
    /************************************************************************* -This function creates sparse matrix in a CRS format (expert function for -situations when you are running out of memory). +OUTPUT PARAMETERS: + F2 - function values at X2[] + D2 - first derivatives at X2[] + DD2 - second derivatives at X2[] -This function creates CRS matrix. Typical usage scenario for a CRS matrix -is: -1. creation (you have to tell number of non-zero elements at each row at - this moment) -2. insertion of the matrix elements (row by row, from left to right) -3. matrix is passed to some linear algebra algorithm +ORDER OF POINTS -This function is a memory-efficient alternative to SparseCreate(), but it -is more complex because it requires you to know in advance how large your -matrix is. Some information about different matrix formats can be found -in comments on SparseMatrix structure. We recommend you to read them -before starting to use ALGLIB sparse matrices.. +Subroutine automatically sorts points, so caller may pass unsorted array. +Function values are correctly reordered on return, so F2[I] is always +equal to S(X2[I]) independently of points order. -INPUT PARAMETERS - M - number of rows in a matrix, M>=1 - N - number of columns in a matrix, N>=1 - NER - number of elements at each row, array[M], NER[I]>=0 +SETTING BOUNDARY VALUES: -OUTPUT PARAMETERS - S - sparse M*N matrix in CRS representation. - You have to fill ALL non-zero elements by calling - SparseSet() BEFORE you try to use this matrix. +The BoundLType/BoundRType parameters can have the following values: + * -1, which corresonds to the periodic (cyclic) boundary conditions. + In this case: + * both BoundLType and BoundRType must be equal to -1. + * BoundL/BoundR are ignored + * Y[last] is ignored (it is assumed to be equal to Y[first]). + * 0, which corresponds to the parabolically terminated spline + (BoundL and/or BoundR are ignored). + * 1, which corresponds to the first derivative boundary condition + * 2, which corresponds to the second derivative boundary condition + * by default, BoundType=0 is used -NOTE: this function completely overwrites S with new sparse matrix. - Previously allocated storage is NOT reused. If you want to reuse - already allocated memory, call SparseCreateCRSBuf function. +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: + +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey + Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecreatecrs( - ae_int_t m, +
    void alglib::spline1dconvdiff2cubic( + real_1d_array x, + real_1d_array y, + real_1d_array x2, + real_1d_array& y2, + real_1d_array& d2, + real_1d_array& dd2, + const xparams _params = alglib::xdefault); +void alglib::spline1dconvdiff2cubic( + real_1d_array x, + real_1d_array y, ae_int_t n, - integer_1d_array ner, - sparsematrix& s); + ae_int_t boundltype, + double boundl, + ae_int_t boundrtype, + double boundr, + real_1d_array x2, + ae_int_t n2, + real_1d_array& y2, + real_1d_array& d2, + real_1d_array& dd2, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function creates sparse matrix in a CRS format (expert function for -situations when you are running out of memory). This version of CRS -matrix creation function may reuse memory already allocated in S. +This function solves following problem: given table y[] of function values +at old nodes x[] and new nodes x2[], it calculates and returns table of +function values y2[] and derivatives d2[] (calculated at x2[]). -This function creates CRS matrix. Typical usage scenario for a CRS matrix -is: -1. creation (you have to tell number of non-zero elements at each row at - this moment) -2. insertion of the matrix elements (row by row, from left to right) -3. matrix is passed to some linear algebra algorithm +This function yields same result as Spline1DBuildCubic() call followed by +sequence of Spline1DDiff() calls, but it can be several times faster when +called for ordered X[] and X2[]. -This function is a memory-efficient alternative to SparseCreate(), but it -is more complex because it requires you to know in advance how large your -matrix is. Some information about different matrix formats can be found -in comments on SparseMatrix structure. We recommend you to read them -before starting to use ALGLIB sparse matrices.. +INPUT PARAMETERS: + X - old spline nodes + Y - function values + X2 - new spline nodes -INPUT PARAMETERS - M - number of rows in a matrix, M>=1 - N - number of columns in a matrix, N>=1 - NER - number of elements at each row, array[M], NER[I]>=0 - S - sparse matrix structure with possibly preallocated - memory. +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points from X/Y are used + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundLType - boundary condition type for the left boundary + BoundL - left boundary condition (first or second derivative, + depending on the BoundLType) + BoundRType - boundary condition type for the right boundary + BoundR - right boundary condition (first or second derivative, + depending on the BoundRType) + N2 - new points count: + * N2>=2 + * if given, only first N2 points from X2 are used + * if not given, automatically detected from X2 size -OUTPUT PARAMETERS - S - sparse M*N matrix in CRS representation. - You have to fill ALL non-zero elements by calling - SparseSet() BEFORE you try to use this matrix. +OUTPUT PARAMETERS: + F2 - function values at X2[] + D2 - first derivatives at X2[] - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsecreatecrsbuf( - ae_int_t m, - ae_int_t n, - integer_1d_array ner, - sparsematrix s); +ORDER OF POINTS -
    - -
    -
    /************************************************************************* -This function creates sparse matrix in a SKS format (skyline storage -format). In most cases you do not need this function - CRS format better -suits most use cases. +Subroutine automatically sorts points, so caller may pass unsorted array. +Function values are correctly reordered on return, so F2[I] is always +equal to S(X2[I]) independently of points order. -INPUT PARAMETERS - M, N - number of rows(M) and columns (N) in a matrix: - * M=N (as for now, ALGLIB supports only square SKS) - * N>=1 - * M>=1 - D - "bottom" bandwidths, array[M], D[I]>=0. - I-th element stores number of non-zeros at I-th row, - below the diagonal (diagonal itself is not included) - U - "top" bandwidths, array[N], U[I]>=0. - I-th element stores number of non-zeros at I-th row, - above the diagonal (diagonal itself is not included) +SETTING BOUNDARY VALUES: -OUTPUT PARAMETERS - S - sparse M*N matrix in SKS representation. - All elements are filled by zeros. - You may use SparseRewriteExisting() to change their - values. +The BoundLType/BoundRType parameters can have the following values: + * -1, which corresonds to the periodic (cyclic) boundary conditions. + In this case: + * both BoundLType and BoundRType must be equal to -1. + * BoundL/BoundR are ignored + * Y[last] is ignored (it is assumed to be equal to Y[first]). + * 0, which corresponds to the parabolically terminated spline + (BoundL and/or BoundR are ignored). + * 1, which corresponds to the first derivative boundary condition + * 2, which corresponds to the second derivative boundary condition + * by default, BoundType=0 is used -NOTE: this function completely overwrites S with new sparse matrix. - Previously allocated storage is NOT reused. If you want to reuse - already allocated memory, call SparseCreateSKSBuf function. +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: + +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 13.01.2014 by Bochkanov Sergey + Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecreatesks( - ae_int_t m, +
    void alglib::spline1dconvdiffcubic( + real_1d_array x, + real_1d_array y, + real_1d_array x2, + real_1d_array& y2, + real_1d_array& d2, + const xparams _params = alglib::xdefault); +void alglib::spline1dconvdiffcubic( + real_1d_array x, + real_1d_array y, ae_int_t n, - integer_1d_array d, - integer_1d_array u, - sparsematrix& s); + ae_int_t boundltype, + double boundl, + ae_int_t boundrtype, + double boundr, + real_1d_array x2, + ae_int_t n2, + real_1d_array& y2, + real_1d_array& d2, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This is "buffered" version of SparseCreateSKS() which reuses memory -previously allocated in S (of course, memory is reallocated if needed). - -This function creates sparse matrix in a SKS format (skyline storage -format). In most cases you do not need this function - CRS format better -suits most use cases. +This subroutine differentiates the spline. -INPUT PARAMETERS - M, N - number of rows(M) and columns (N) in a matrix: - * M=N (as for now, ALGLIB supports only square SKS) - * N>=1 - * M>=1 - D - "bottom" bandwidths, array[M], 0<=D[I]<=I. - I-th element stores number of non-zeros at I-th row, - below the diagonal (diagonal itself is not included) - U - "top" bandwidths, array[N], 0<=U[I]<=I. - I-th element stores number of non-zeros at I-th row, - above the diagonal (diagonal itself is not included) +INPUT PARAMETERS: + C - spline interpolant. + X - point -OUTPUT PARAMETERS - S - sparse M*N matrix in SKS representation. - All elements are filled by zeros. - You may use SparseSet()/SparseAdd() to change their - values. +Result: + S - S(x) + DS - S'(x) + D2S - S''(x) -- ALGLIB PROJECT -- - Copyright 13.01.2014 by Bochkanov Sergey + Copyright 24.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsecreatesksbuf( - ae_int_t m, - ae_int_t n, - integer_1d_array d, - integer_1d_array u, - sparsematrix s); +
    void alglib::spline1ddiff( + spline1dinterpolant c, + double x, + double& s, + double& ds, + double& d2s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function is used to enumerate all elements of the sparse matrix. -Before first call user initializes T0 and T1 counters by zero. These -counters are used to remember current position in a matrix; after each -call they are updated by the function. +Fitting by smoothing (penalized) cubic spline. -Subsequent calls to this function return non-zero elements of the sparse -matrix, one by one. If you enumerate CRS matrix, matrix is traversed from -left to right, from top to bottom. In case you enumerate matrix stored as -Hash table, elements are returned in random order. - -EXAMPLE - > T0=0 - > T1=0 - > while SparseEnumerate(S,T0,T1,I,J,V) do - > ....do something with I,J,V +This function approximates N scattered points (some of X[] may be equal to +each other) by cubic spline with M nodes at equidistant grid spanning +interval [min(x,xc),max(x,xc)]. -INPUT PARAMETERS - S - sparse M*N matrix in Hash-Table or CRS representation. - T0 - internal counter - T1 - internal counter +The problem is regularized by adding nonlinearity penalty to usual least +squares penalty function: -OUTPUT PARAMETERS - T0 - new value of the internal counter - T1 - new value of the internal counter - I - row index of non-zero element, 0<=I<M. - J - column index of non-zero element, 0<=J<N - V - value of the T-th element + MERIT_FUNC = F_LS + F_NL -RESULT - True in case of success (next non-zero element was retrieved) - False in case all non-zero elements were enumerated +where F_LS is a least squares error term, and F_NL is a nonlinearity +penalty which is roughly proportional to LambdaNS*integral{ S''(x)^2*dx }. +Algorithm applies automatic renormalization of F_NL which makes penalty +term roughly invariant to scaling of X[] and changes in M. -NOTE: you may call SparseRewriteExisting() during enumeration, but it is - THE ONLY matrix modification function you can call!!! Other - matrix modification functions should not be called during enumeration! +This function is a new edition of penalized regression spline fitting, +a fast and compact one which needs much less resources that its previous +version: just O(maxMN) memory and O(maxMN*log(maxMN)) time. - -- ALGLIB PROJECT -- - Copyright 14.03.2012 by Bochkanov Sergey -*************************************************************************/ -
    bool alglib::sparseenumerate( - sparsematrix s, - ae_int_t& t0, - ae_int_t& t1, - ae_int_t& i, - ae_int_t& j, - double& v); +NOTE: it is OK to run this function with both M<<N and M>>N; say, it is + possible to process 100 points with 1000-node spline. -
    - -
    -
    /************************************************************************* -The function frees all memory occupied by sparse matrix. Sparse matrix -structure becomes unusable after this call. +INPUT PARAMETERS: + X - points, array[0..N-1]. + Y - function values, array[0..N-1]. + N - number of points (optional): + * N>0 + * if given, only first N elements of X/Y are processed + * if not given, automatically determined from lengths + M - number of basis functions ( = number_of_nodes), M>=4. + LambdaNS - LambdaNS>=0, regularization constant passed by user. + It penalizes nonlinearity in the regression spline. + Possible values to start from are 0.00001, 0.1, 1 -OUTPUT PARAMETERS - S - sparse matrix to delete +OUTPUT PARAMETERS: + S - spline interpolant. + Rep - Following fields are set: + * RMSError rms error on the (X,Y). + * AvgError average error on the (X,Y). + * AvgRelError average relative error on the non-zero Y + * MaxError maximum error -- ALGLIB PROJECT -- - Copyright 24.07.2012 by Bochkanov Sergey + Copyright 27.08.2019 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsefree(sparsematrix& s); +
    void alglib::spline1dfit( + real_1d_array x, + real_1d_array y, + ae_int_t m, + double lambdans, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault); +void alglib::spline1dfit( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t m, + double lambdans, + spline1dinterpolant& s, + spline1dfitreport& rep, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function returns S[i,j] - element of the sparse matrix. Matrix can -be in any mode (Hash-Table, CRS, SKS), but this function is less efficient -for CRS matrices. Hash-Table and SKS matrices can find element in O(1) -time, while CRS matrices need O(log(RS)) time, where RS is an number of -non-zero elements in a row. +This function solves following problem: given table y[] of function values +at nodes x[], it calculates and returns tables of first and second +function derivatives d1[] and d2[] (calculated at the same nodes x[]). -INPUT PARAMETERS - S - sparse M*N matrix in Hash-Table representation. - Exception will be thrown for CRS matrix. - I - row index of the element to modify, 0<=I<M - J - column index of the element to modify, 0<=J<N +This function yields same result as Spline1DBuildCubic() call followed by +sequence of Spline1DDiff() calls, but it can be several times faster when +called for ordered X[] and X2[]. -RESULT - value of S[I,J] or zero (in case no element with such index is found) +INPUT PARAMETERS: + X - spline nodes + Y - function values - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::sparseget(sparsematrix s, ae_int_t i, ae_int_t j); +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points are used + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundLType - boundary condition type for the left boundary + BoundL - left boundary condition (first or second derivative, + depending on the BoundLType) + BoundRType - boundary condition type for the right boundary + BoundR - right boundary condition (first or second derivative, + depending on the BoundRType) -
    -

    Examples:   [1]  [2]  

    - -
    -
    /************************************************************************* -This function returns I-th row of the sparse matrix IN COMPRESSED FORMAT - -only non-zero elements are returned (with their indexes). Matrix must be -stored in CRS or SKS format. +OUTPUT PARAMETERS: + D1 - S' values at X[] + D2 - S'' values at X[] -INPUT PARAMETERS: - S - sparse M*N matrix in CRS format - I - row index, 0<=I<M - ColIdx - output buffer for column indexes, can be preallocated. - In case buffer size is too small to store I-th row, it - is automatically reallocated. - Vals - output buffer for values, can be preallocated. In case - buffer size is too small to store I-th row, it is - automatically reallocated. +ORDER OF POINTS -OUTPUT PARAMETERS: - ColIdx - column indexes of non-zero elements, sorted by - ascending. Symbolically non-zero elements are counted - (i.e. if you allocated place for element, but it has - zero numerical value - it is counted). - Vals - values. Vals[K] stores value of matrix element with - indexes (I,ColIdx[K]). Symbolically non-zero elements - are counted (i.e. if you allocated place for element, - but it has zero numerical value - it is counted). - NZCnt - number of symbolically non-zero elements per row. +Subroutine automatically sorts points, so caller may pass unsorted array. +Derivative values are correctly reordered on return, so D[I] is always +equal to S'(X[I]) independently of points order. + +SETTING BOUNDARY VALUES: + +The BoundLType/BoundRType parameters can have the following values: + * -1, which corresonds to the periodic (cyclic) boundary conditions. + In this case: + * both BoundLType and BoundRType must be equal to -1. + * BoundL/BoundR are ignored + * Y[last] is ignored (it is assumed to be equal to Y[first]). + * 0, which corresponds to the parabolically terminated spline + (BoundL and/or BoundR are ignored). + * 1, which corresponds to the first derivative boundary condition + * 2, which corresponds to the second derivative boundary condition + * by default, BoundType=0 is used -NOTE: when incorrect I (outside of [0,M-1]) or matrix (non CRS/SKS) - is passed, this function throws exception. +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: -NOTE: this function may allocate additional, unnecessary place for ColIdx - and Vals arrays. It is dictated by performance reasons - on SKS - matrices it is faster to allocate space at the beginning with - some "extra"-space, than performing two passes over matrix - first - time to calculate exact space required for data, second time - to - store data itself. +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 10.12.2014 by Bochkanov Sergey + Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsegetcompressedrow( - sparsematrix s, - ae_int_t i, - integer_1d_array& colidx, - real_1d_array& vals, - ae_int_t& nzcnt); +
    void alglib::spline1dgriddiff2cubic( + real_1d_array x, + real_1d_array y, + real_1d_array& d1, + real_1d_array& d2, + const xparams _params = alglib::xdefault); +void alglib::spline1dgriddiff2cubic( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t boundltype, + double boundl, + ae_int_t boundrtype, + double boundr, + real_1d_array& d1, + real_1d_array& d2, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function returns I-th diagonal element of the sparse matrix. +This function solves following problem: given table y[] of function values +at nodes x[], it calculates and returns table of function derivatives d[] +(calculated at the same nodes x[]). -Matrix can be in any mode (Hash-Table or CRS storage), but this function -is most efficient for CRS matrices - it requires less than 50 CPU cycles -to extract diagonal element. For Hash-Table matrices we still have O(1) -query time, but function is many times slower. +This function yields same result as Spline1DBuildCubic() call followed by +sequence of Spline1DDiff() calls, but it can be several times faster when +called for ordered X[] and X2[]. -INPUT PARAMETERS - S - sparse M*N matrix in Hash-Table representation. - Exception will be thrown for CRS matrix. - I - index of the element to modify, 0<=I<min(M,N) +INPUT PARAMETERS: + X - spline nodes + Y - function values -RESULT - value of S[I,I] or zero (in case no element with such index is found) +OPTIONAL PARAMETERS: + N - points count: + * N>=2 + * if given, only first N points are used + * if not given, automatically detected from X/Y sizes + (len(X) must be equal to len(Y)) + BoundLType - boundary condition type for the left boundary + BoundL - left boundary condition (first or second derivative, + depending on the BoundLType) + BoundRType - boundary condition type for the right boundary + BoundR - right boundary condition (first or second derivative, + depending on the BoundRType) - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::sparsegetdiagonal(sparsematrix s, ae_int_t i); +OUTPUT PARAMETERS: + D - derivative values at X[] -
    - -
    -
    /************************************************************************* -The function returns number of strictly lower triangular non-zero elements -in the matrix. It counts SYMBOLICALLY non-zero elements, i.e. entries -in the sparse matrix data structure. If some element has zero numerical -value, it is still counted. +ORDER OF POINTS -This function has different cost for different types of matrices: -* for hash-based matrices it involves complete pass over entire hash-table - with O(NNZ) cost, where NNZ is number of non-zero elements -* for CRS and SKS matrix types cost of counting is O(N) (N - matrix size). +Subroutine automatically sorts points, so caller may pass unsorted array. +Derivative values are correctly reordered on return, so D[I] is always +equal to S'(X[I]) independently of points order. -RESULT: number of non-zero elements strictly below main diagonal +SETTING BOUNDARY VALUES: + +The BoundLType/BoundRType parameters can have the following values: + * -1, which corresonds to the periodic (cyclic) boundary conditions. + In this case: + * both BoundLType and BoundRType must be equal to -1. + * BoundL/BoundR are ignored + * Y[last] is ignored (it is assumed to be equal to Y[first]). + * 0, which corresponds to the parabolically terminated spline + (BoundL and/or BoundR are ignored). + * 1, which corresponds to the first derivative boundary condition + * 2, which corresponds to the second derivative boundary condition + * by default, BoundType=0 is used + +PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: + +Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. +However, this subroutine doesn't require you to specify equal values for +the first and last points - it automatically forces them to be equal by +copying Y[first_point] (corresponds to the leftmost, minimal X[]) to +Y[last_point]. However it is recommended to pass consistent values of Y[], +i.e. to make Y[first_point]=Y[last_point]. -- ALGLIB PROJECT -- - Copyright 12.02.2014 by Bochkanov Sergey + Copyright 03.09.2010 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::sparsegetlowercount(sparsematrix s); +
    void alglib::spline1dgriddiffcubic( + real_1d_array x, + real_1d_array y, + real_1d_array& d, + const xparams _params = alglib::xdefault); +void alglib::spline1dgriddiffcubic( + real_1d_array x, + real_1d_array y, + ae_int_t n, + ae_int_t boundltype, + double boundl, + ae_int_t boundrtype, + double boundr, + real_1d_array& d, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function returns type of the matrix storage format. +This subroutine integrates the spline. INPUT PARAMETERS: - S - sparse matrix. - -RESULT: - sparse storage format used by matrix: - 0 - Hash-table - 1 - CRS (compressed row storage) - 2 - SKS (skyline) - -NOTE: future versions of ALGLIB may include additional sparse storage - formats. - + C - spline interpolant. + X - right bound of the integration interval [a, x], + here 'a' denotes min(x[]) +Result: + integral(S(t)dt,a,x) -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey + Copyright 23.06.2007 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::sparsegetmatrixtype(sparsematrix s); +
    double alglib::spline1dintegrate( + spline1dinterpolant c, + double x, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -The function returns number of columns of a sparse matrix. +This subroutine performs linear transformation of the spline argument. -RESULT: number of columns of a sparse matrix. +INPUT PARAMETERS: + C - spline interpolant. + A, B- transformation coefficients: x = A*t + B +Result: + C - transformed spline -- ALGLIB PROJECT -- - Copyright 23.08.2012 by Bochkanov Sergey + Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::sparsegetncols(sparsematrix s); +
    void alglib::spline1dlintransx( + spline1dinterpolant c, + double a, + double b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -The function returns number of rows of a sparse matrix. +This subroutine performs linear transformation of the spline. -RESULT: number of rows of a sparse matrix. +INPUT PARAMETERS: + C - spline interpolant. + A, B- transformation coefficients: S2(x) = A*S(x) + B +Result: + C - transformed spline -- ALGLIB PROJECT -- - Copyright 23.08.2012 by Bochkanov Sergey + Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::sparsegetnrows(sparsematrix s); +
    void alglib::spline1dlintransy( + spline1dinterpolant c, + double a, + double b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function returns I-th row of the sparse matrix. Matrix must be stored -in CRS or SKS format. +This subroutine unpacks the spline into the coefficients table. INPUT PARAMETERS: - S - sparse M*N matrix in CRS format - I - row index, 0<=I<M - IRow - output buffer, can be preallocated. In case buffer - size is too small to store I-th row, it is - automatically reallocated. + C - spline interpolant. + X - point OUTPUT PARAMETERS: - IRow - array[M], I-th row. - -NOTE: this function has O(N) running time, where N is a column count. It - allocates and fills N-element array, even although most of its - elemets are zero. - -NOTE: If you have O(non-zeros-per-row) time and memory requirements, use - SparseGetCompressedRow() function. It returns data in compressed - format. + Tbl - coefficients table, unpacked format, array[0..N-2, 0..5]. + For I = 0...N-2: + Tbl[I,0] = X[i] + Tbl[I,1] = X[i+1] + Tbl[I,2] = C0 + Tbl[I,3] = C1 + Tbl[I,4] = C2 + Tbl[I,5] = C3 + On [x[i], x[i+1]] spline is equals to: + S(x) = C0 + C1*t + C2*t^2 + C3*t^3 + t = x-x[i] -NOTE: when incorrect I (outside of [0,M-1]) or matrix (non CRS/SKS) - is passed, this function throws exception. +NOTE: + You can rebuild spline with Spline1DBuildHermite() function, which + accepts as inputs function values and derivatives at nodes, which are + easy to calculate when you have coefficients. -- ALGLIB PROJECT -- - Copyright 10.12.2014 by Bochkanov Sergey + Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsegetrow( - sparsematrix s, - ae_int_t i, - real_1d_array& irow); +
    void alglib::spline1dunpack( + spline1dinterpolant c, + ae_int_t& n, + real_2d_array& tbl, + const xparams _params = alglib::xdefault);
    - +
    -
    /************************************************************************* -The function returns number of strictly upper triangular non-zero elements -in the matrix. It counts SYMBOLICALLY non-zero elements, i.e. entries -in the sparse matrix data structure. If some element has zero numerical -value, it is still counted. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -This function has different cost for different types of matrices: -* for hash-based matrices it involves complete pass over entire hash-table - with O(NNZ) cost, where NNZ is number of non-zero elements -* for CRS and SKS matrix types cost of counting is O(N) (N - matrix size). +using namespace alglib; -RESULT: number of non-zero elements strictly above main diagonal - -- ALGLIB PROJECT -- - Copyright 12.02.2014 by Bochkanov Sergey -*************************************************************************/ -
    ae_int_t alglib::sparsegetuppercount(sparsematrix s); +int main(int argc, char **argv) +{ + // + // We use cubic spline to do resampling, i.e. having + // values of f(x)=x^2 sampled at 5 equidistant nodes on [-1,+1] + // we calculate values/derivatives of cubic spline on + // another grid (equidistant with 9 nodes on [-1,+1]) + // WITHOUT CONSTRUCTION OF SPLINE OBJECT. + // + // There are efficient functions spline1dconvcubic(), + // spline1dconvdiffcubic() and spline1dconvdiff2cubic() + // for such calculations. + // + // We use default boundary conditions ("parabolically terminated + // spline") because cubic spline built with such boundary conditions + // will exactly reproduce any quadratic f(x). + // + // Actually, we could use natural conditions, but we feel that + // spline which exactly reproduces f() will show us more + // understandable results. + // + real_1d_array x_old = "[-1.0,-0.5,0.0,+0.5,+1.0]"; + real_1d_array y_old = "[+1.0,0.25,0.0,0.25,+1.0]"; + real_1d_array x_new = "[-1.00,-0.75,-0.50,-0.25,0.00,+0.25,+0.50,+0.75,+1.00]"; + real_1d_array y_new; + real_1d_array d1_new; + real_1d_array d2_new; -
    - + // + // First, conversion without differentiation. + // + // + spline1dconvcubic(x_old, y_old, x_new, y_new); + printf("%s\n", y_new.tostring(3).c_str()); // EXPECTED: [1.0000, 0.5625, 0.2500, 0.0625, 0.0000, 0.0625, 0.2500, 0.5625, 1.0000] + + // + // Then, conversion with differentiation (first derivatives only) + // + // + spline1dconvdiffcubic(x_old, y_old, x_new, y_new, d1_new); + printf("%s\n", y_new.tostring(3).c_str()); // EXPECTED: [1.0000, 0.5625, 0.2500, 0.0625, 0.0000, 0.0625, 0.2500, 0.5625, 1.0000] + printf("%s\n", d1_new.tostring(3).c_str()); // EXPECTED: [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0] + + // + // Finally, conversion with first and second derivatives + // + // + spline1dconvdiff2cubic(x_old, y_old, x_new, y_new, d1_new, d2_new); + printf("%s\n", y_new.tostring(3).c_str()); // EXPECTED: [1.0000, 0.5625, 0.2500, 0.0625, 0.0000, 0.0625, 0.2500, 0.5625, 1.0000] + printf("%s\n", d1_new.tostring(3).c_str()); // EXPECTED: [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0] + printf("%s\n", d2_new.tostring(3).c_str()); // EXPECTED: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0] + return 0; +} + + +
    -
    /************************************************************************* -This function checks matrix storage format and returns True when matrix is -stored using CRS representation. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS: - S - sparse matrix. +using namespace alglib; -RESULT: - True if matrix type is CRS - False if matrix type is not CRS - -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey -*************************************************************************/ -
    bool alglib::sparseiscrs(sparsematrix s); +int main(int argc, char **argv) +{ + // + // We use cubic spline to interpolate f(x)=x^2 sampled + // at 5 equidistant nodes on [-1,+1]. + // + // First, we use default boundary conditions ("parabolically terminated + // spline") because cubic spline built with such boundary conditions + // will exactly reproduce any quadratic f(x). + // + // Then we try to use natural boundary conditions + // d2S(-1)/dx^2 = 0.0 + // d2S(+1)/dx^2 = 0.0 + // and see that such spline interpolated f(x) with small error. + // + real_1d_array x = "[-1.0,-0.5,0.0,+0.5,+1.0]"; + real_1d_array y = "[+1.0,0.25,0.0,0.25,+1.0]"; + double t = 0.25; + double v; + spline1dinterpolant s; + ae_int_t natural_bound_type = 2; + // + // Test exact boundary conditions: build S(x), calculare S(0.25) + // (almost same as original function) + // + spline1dbuildcubic(x, y, s); + v = spline1dcalc(s, t); + printf("%.4f\n", double(v)); // EXPECTED: 0.0625 -
    - + // + // Test natural boundary conditions: build S(x), calculare S(0.25) + // (small interpolation error) + // + spline1dbuildcubic(x, y, 5, natural_bound_type, 0.0, natural_bound_type, 0.0, s); + v = spline1dcalc(s, t); + printf("%.3f\n", double(v)); // EXPECTED: 0.0580 + return 0; +} + + +
    -
    /************************************************************************* -This function checks matrix storage format and returns True when matrix is -stored using Hash table representation. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS: - S - sparse matrix. +using namespace alglib; -RESULT: - True if matrix type is Hash table - False if matrix type is not Hash table - -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey -*************************************************************************/ -
    bool alglib::sparseishash(sparsematrix s); +int main(int argc, char **argv) +{ + // + // We use cubic spline to do grid differentiation, i.e. having + // values of f(x)=x^2 sampled at 5 equidistant nodes on [-1,+1] + // we calculate derivatives of cubic spline at nodes WITHOUT + // CONSTRUCTION OF SPLINE OBJECT. + // + // There are efficient functions spline1dgriddiffcubic() and + // spline1dgriddiff2cubic() for such calculations. + // + // We use default boundary conditions ("parabolically terminated + // spline") because cubic spline built with such boundary conditions + // will exactly reproduce any quadratic f(x). + // + // Actually, we could use natural conditions, but we feel that + // spline which exactly reproduces f() will show us more + // understandable results. + // + real_1d_array x = "[-1.0,-0.5,0.0,+0.5,+1.0]"; + real_1d_array y = "[+1.0,0.25,0.0,0.25,+1.0]"; + real_1d_array d1; + real_1d_array d2; -
    - + // + // We calculate first derivatives: they must be equal to 2*x + // + spline1dgriddiffcubic(x, y, d1); + printf("%s\n", d1.tostring(3).c_str()); // EXPECTED: [-2.0, -1.0, 0.0, +1.0, +2.0] + + // + // Now test griddiff2, which returns first AND second derivatives. + // First derivative is 2*x, second is equal to 2.0 + // + spline1dgriddiff2cubic(x, y, d1, d2); + printf("%s\n", d1.tostring(3).c_str()); // EXPECTED: [-2.0, -1.0, 0.0, +1.0, +2.0] + printf("%s\n", d2.tostring(3).c_str()); // EXPECTED: [ 2.0, 2.0, 2.0, 2.0, 2.0] + return 0; +} + + +
    -
    /************************************************************************* -This function checks matrix storage format and returns True when matrix is -stored using SKS representation. +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS: - S - sparse matrix. +using namespace alglib; -RESULT: - True if matrix type is SKS - False if matrix type is not SKS - -- ALGLIB PROJECT -- - Copyright 20.07.2012 by Bochkanov Sergey -*************************************************************************/ -
    bool alglib::sparseissks(sparsematrix s); +int main(int argc, char **argv) +{ + // + // We use piecewise linear spline to interpolate f(x)=x^2 sampled + // at 5 equidistant nodes on [-1,+1]. + // + real_1d_array x = "[-1.0,-0.5,0.0,+0.5,+1.0]"; + real_1d_array y = "[+1.0,0.25,0.0,0.25,+1.0]"; + double t = 0.25; + double v; + spline1dinterpolant s; -
    - + // build spline + spline1dbuildlinear(x, y, s); + + // calculate S(0.25) - it is quite different from 0.25^2=0.0625 + v = spline1dcalc(s, t); + printf("%.4f\n", double(v)); // EXPECTED: 0.125 + return 0; +} + + +
    -
    /************************************************************************* -This function calculates matrix-matrix product S*A. Matrix S must be -stored in CRS or SKS format (exception will be thrown otherwise). +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -INPUT PARAMETERS - S - sparse M*N matrix in CRS or SKS format. - A - array[N][K], input dense matrix. For performance reasons - we make only quick checks - we check that array size - is at least N, but we do not check for NAN's or INF's. - K - number of columns of matrix (A). - B - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. +using namespace alglib; + + +int main(int argc, char **argv) +{ + // + // Spline built witn spline1dbuildcubic() can be non-monotone even when + // Y-values form monotone sequence. Say, for x=[0,1,2] and y=[0,1,1] + // cubic spline will monotonically grow until x=1.5 and then start + // decreasing. + // + // That's why ALGLIB provides special spline construction function + // which builds spline which preserves monotonicity of the original + // dataset. + // + // NOTE: in case original dataset is non-monotonic, ALGLIB splits it + // into monotone subsequences and builds piecewise monotonic spline. + // + real_1d_array x = "[0,1,2]"; + real_1d_array y = "[0,1,1]"; + spline1dinterpolant s; -OUTPUT PARAMETERS - B - array[M][K], S*A + // build spline + spline1dbuildmonotone(x, y, s); -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. + // calculate S at x = [-0.5, 0.0, 0.5, 1.0, 1.5, 2.0] + // you may see that spline is really monotonic + double v; + v = spline1dcalc(s, -0.5); + printf("%.4f\n", double(v)); // EXPECTED: 0.0000 + v = spline1dcalc(s, 0.0); + printf("%.4f\n", double(v)); // EXPECTED: 0.0000 + v = spline1dcalc(s, +0.5); + printf("%.4f\n", double(v)); // EXPECTED: 0.5000 + v = spline1dcalc(s, 1.0); + printf("%.4f\n", double(v)); // EXPECTED: 1.0000 + v = spline1dcalc(s, 1.5); + printf("%.4f\n", double(v)); // EXPECTED: 1.0000 + v = spline1dcalc(s, 2.0); + printf("%.4f\n", double(v)); // EXPECTED: 1.0000 + return 0; +} - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsemm( - sparsematrix s, - real_2d_array a, - ae_int_t k, - real_2d_array& b); -
    - + + +
     
    /************************************************************************* -This function simultaneously calculates two matrix-matrix products: - S*A and S^T*A. -S must be square (non-rectangular) matrix stored in CRS or SKS format -(exception will be thrown otherwise). - -INPUT PARAMETERS - S - sparse N*N matrix in CRS or SKS format. - A - array[N][K], input dense matrix. For performance reasons - we make only quick checks - we check that array size is - at least N, but we do not check for NAN's or INF's. - K - number of columns of matrix (A). - B0 - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - B1 - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - -OUTPUT PARAMETERS - B0 - array[N][K], S*A - B1 - array[N][K], S^T*A - -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. - - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey +Nonlinear least squares solver used to fit 2D splines to data *************************************************************************/ -
    void alglib::sparsemm2( - sparsematrix s, - real_2d_array a, - ae_int_t k, - real_2d_array& b0, - real_2d_array& b1); +
    class spline2dbuilder +{ +};
    - +
     
    /************************************************************************* -This function calculates matrix-matrix product S^T*A. Matrix S must be -stored in CRS or SKS format (exception will be thrown otherwise). - -INPUT PARAMETERS - S - sparse M*N matrix in CRS or SKS format. - A - array[M][K], input dense matrix. For performance reasons - we make only quick checks - we check that array size is - at least M, but we do not check for NAN's or INF's. - K - number of columns of matrix (A). - B - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - -OUTPUT PARAMETERS - B - array[N][K], S^T*A - -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. - - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey +Spline 2D fitting report: + rmserror RMS error + avgerror average error + maxerror maximum error + r2 coefficient of determination, R-squared, 1-RSS/TSS *************************************************************************/ -
    void alglib::sparsemtm( - sparsematrix s, - real_2d_array a, - ae_int_t k, - real_2d_array& b); +
    class spline2dfitreport +{ + double rmserror; + double avgerror; + double maxerror; + double r2; +};
    - +
     
    /************************************************************************* -This function calculates matrix-vector product S^T*x. Matrix S must be -stored in CRS or SKS format (exception will be thrown otherwise). - -INPUT PARAMETERS - S - sparse M*N matrix in CRS or SKS format. - X - array[M], input vector. For performance reasons we - make only quick checks - we check that array size is - at least M, but we do not check for NAN's or INF's. - Y - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - -OUTPUT PARAMETERS - Y - array[N], S^T*x - -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. - - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey +2-dimensional spline inteprolant *************************************************************************/ -
    void alglib::sparsemtv(sparsematrix s, real_1d_array x, real_1d_array& y); +
    class spline2dinterpolant +{ +};
    - +
     
    /************************************************************************* -This function calculates matrix-vector product S*x. Matrix S must be -stored in CRS or SKS format (exception will be thrown otherwise). - -INPUT PARAMETERS - S - sparse M*N matrix in CRS or SKS format. - X - array[N], input vector. For performance reasons we - make only quick checks - we check that array size is - at least N, but we do not check for NAN's or INF's. - Y - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - -OUTPUT PARAMETERS - Y - array[M], S*x +This subroutine was deprecated in ALGLIB 3.6.0 -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. +We recommend you to switch to Spline2DBuildBicubicV(), which is more +flexible and accepts its arguments in more convenient order. -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey + Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsemv(sparsematrix s, real_1d_array x, real_1d_array& y); +
    void alglib::spline2dbuildbicubic( + real_1d_array x, + real_1d_array y, + real_2d_array f, + ae_int_t m, + ae_int_t n, + spline2dinterpolant& c, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +
     
    /************************************************************************* -This function simultaneously calculates two matrix-vector products: - S*x and S^T*x. -S must be square (non-rectangular) matrix stored in CRS or SKS format -(exception will be thrown otherwise). - -INPUT PARAMETERS - S - sparse N*N matrix in CRS or SKS format. - X - array[N], input vector. For performance reasons we - make only quick checks - we check that array size is - at least N, but we do not check for NAN's or INF's. - Y0 - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - Y1 - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. +This subroutine builds bicubic vector-valued spline. -OUTPUT PARAMETERS - Y0 - array[N], S*x - Y1 - array[N], S^T*x +Input parameters: + X - spline abscissas, array[0..N-1] + Y - spline ordinates, array[0..M-1] + F - function values, array[0..M*N*D-1]: + * first D elements store D values at (X[0],Y[0]) + * next D elements store D values at (X[1],Y[0]) + * general form - D function values at (X[i],Y[j]) are stored + at F[D*(J*N+I)...D*(J*N+I)+D-1]. + M,N - grid size, M>=2, N>=2 + D - vector dimension, D>=1 -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. +Output parameters: + C - spline interpolant -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey + Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsemv2( - sparsematrix s, +
    void alglib::spline2dbuildbicubicv( real_1d_array x, - real_1d_array& y0, - real_1d_array& y1); + ae_int_t n, + real_1d_array y, + ae_int_t m, + real_1d_array f, + ae_int_t d, + spline2dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This procedure resizes Hash-Table matrix. It can be called when you have -deleted too many elements from the matrix, and you want to free unneeded -memory. +This subroutine was deprecated in ALGLIB 3.6.0 + +We recommend you to switch to Spline2DBuildBilinearV(), which is more +flexible and accepts its arguments in more convenient order. -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey + Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparseresizematrix(sparsematrix s); +
    void alglib::spline2dbuildbilinear( + real_1d_array x, + real_1d_array y, + real_2d_array f, + ae_int_t m, + ae_int_t n, + spline2dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function rewrites existing (non-zero) element. It returns True if -element exists or False, when it is called for non-existing (zero) -element. - -This function works with any kind of the matrix. - -The purpose of this function is to provide convenient thread-safe way to -modify sparse matrix. Such modification (already existing element is -rewritten) is guaranteed to be thread-safe without any synchronization, as -long as different threads modify different elements. +This subroutine builds bilinear vector-valued spline. -INPUT PARAMETERS - S - sparse M*N matrix in any kind of representation - (Hash, SKS, CRS). - I - row index of non-zero element to modify, 0<=I<M - J - column index of non-zero element to modify, 0<=J<N - V - value to rewrite, must be finite number +Input parameters: + X - spline abscissas, array[0..N-1] + Y - spline ordinates, array[0..M-1] + F - function values, array[0..M*N*D-1]: + * first D elements store D values at (X[0],Y[0]) + * next D elements store D values at (X[1],Y[0]) + * general form - D function values at (X[i],Y[j]) are stored + at F[D*(J*N+I)...D*(J*N+I)+D-1]. + M,N - grid size, M>=2, N>=2 + D - vector dimension, D>=1 -OUTPUT PARAMETERS - S - modified matrix -RESULT - True in case when element exists - False in case when element doesn't exist or it is zero +Output parameters: + C - spline interpolant -- ALGLIB PROJECT -- - Copyright 14.03.2012 by Bochkanov Sergey + Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::sparserewriteexisting( - sparsematrix s, - ae_int_t i, - ae_int_t j, - double v); +
    void alglib::spline2dbuildbilinearv( + real_1d_array x, + ae_int_t n, + real_1d_array y, + ae_int_t m, + real_1d_array f, + ae_int_t d, + spline2dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function modifies S[i,j] - element of the sparse matrix. +This subroutine creates least squares solver used to fit 2D splines to +irregularly sampled (scattered) data. -For Hash-based storage format: -* this function can be called at any moment - during matrix initialization - or later -* new value can be zero or non-zero. In case new value of S[i,j] is zero, - this element is deleted from the table. -* this function has no effect when called with zero V for non-existent - element. +Solver object is used to perform spline fits as follows: +* solver object is created with spline2dbuildercreate() function +* dataset is added with spline2dbuildersetpoints() function +* fit area is chosen: + * spline2dbuildersetarea() - for user-defined area + * spline2dbuildersetareaauto() - for automatically chosen area +* number of grid nodes is chosen with spline2dbuildersetgrid() +* prior term is chosen with one of the following functions: + * spline2dbuildersetlinterm() to set linear prior + * spline2dbuildersetconstterm() to set constant prior + * spline2dbuildersetzeroterm() to set zero prior + * spline2dbuildersetuserterm() to set user-defined constant prior +* solver algorithm is chosen with either: + * spline2dbuildersetalgoblocklls() - BlockLLS algorithm, medium-scale problems + * spline2dbuildersetalgofastddm() - FastDDM algorithm, large-scale problems +* finally, fitting itself is performed with spline2dfit() function. -For CRS-bases storage format: -* this function can be called ONLY DURING MATRIX INITIALIZATION -* new value MUST be non-zero. Exception will be thrown for zero V. -* elements must be initialized in correct order - from top row to bottom, - within row - from left to right. +Most of the steps above can be omitted, solver is configured with good +defaults. The minimum is to call: +* spline2dbuildercreate() to create solver object +* spline2dbuildersetpoints() to specify dataset +* spline2dbuildersetgrid() to tell how many nodes you need +* spline2dfit() to perform fit -For SKS storage: NOT SUPPORTED! Use SparseRewriteExisting() to work with -SKS matrices. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -INPUT PARAMETERS - S - sparse M*N matrix in Hash-Table or CRS representation. - I - row index of the element to modify, 0<=I<M - J - column index of the element to modify, 0<=J<N - V - value to set, must be finite number, can be zero +INPUT PARAMETERS: + D - positive number, number of Y-components: D=1 for simple scalar + fit, D>1 for vector-valued spline fitting. -OUTPUT PARAMETERS - S - modified matrix +OUTPUT PARAMETERS: + S - solver object -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey + Copyright 29.01.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparseset(sparsematrix s, ae_int_t i, ae_int_t j, double v); +
    void alglib::spline2dbuildercreate( + ae_int_t d, + spline2dbuilder& state, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function calculates matrix-matrix product S*A, when S is symmetric -matrix. Matrix S must be stored in CRS or SKS format (exception will be -thrown otherwise). - -INPUT PARAMETERS - S - sparse M*M matrix in CRS or SKS format. - IsUpper - whether upper or lower triangle of S is given: - * if upper triangle is given, only S[i,j] for j>=i - are used, and lower triangle is ignored (it can be - empty - these elements are not referenced at all). - * if lower triangle is given, only S[i,j] for j<=i - are used, and upper triangle is ignored. - A - array[N][K], input dense matrix. For performance reasons - we make only quick checks - we check that array size is - at least N, but we do not check for NAN's or INF's. - K - number of columns of matrix (A). - B - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. - -OUTPUT PARAMETERS - B - array[M][K], S*A - -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. +This function allows you to choose least squares solver used to perform +fitting. This function sets solver algorithm to "BlockLLS", which performs +least squares fitting with fast sparse direct solver, with optional +nonsmoothness penalty being applied. - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsesmm( - sparsematrix s, - bool isupper, - real_2d_array a, - ae_int_t k, - real_2d_array& b); +Nonlinearity penalty has the following form: -
    - -
    -
    /************************************************************************* -This function calculates matrix-vector product S*x, when S is symmetric -matrix. Matrix S must be stored in CRS or SKS format (exception will be -thrown otherwise). + [ ] + P() ~ Lambda* integral[ (d2S/dx2)^2 + 2*(d2S/dxdy)^2 + (d2S/dy2)^2 ]dxdy + [ ] -INPUT PARAMETERS - S - sparse M*M matrix in CRS or SKS format. - IsUpper - whether upper or lower triangle of S is given: - * if upper triangle is given, only S[i,j] for j>=i - are used, and lower triangle is ignored (it can be - empty - these elements are not referenced at all). - * if lower triangle is given, only S[i,j] for j<=i - are used, and upper triangle is ignored. - X - array[N], input vector. For performance reasons we - make only quick checks - we check that array size is - at least N, but we do not check for NAN's or INF's. - Y - output buffer, possibly preallocated. In case buffer - size is too small to store result, this buffer is - automatically resized. +here integral is calculated over entire grid, and "~" means "proportional" +because integral is normalized after calcilation. Extremely large values +of Lambda result in linear fit being performed. -OUTPUT PARAMETERS - Y - array[M], S*x +NOTE: this algorithm is the most robust and controllable one, but it is + limited by 512x512 grids and (say) up to 1.000.000 points. However, + ALGLIB has one more spline solver: FastDDM algorithm, which is + intended for really large-scale problems (in 10M-100M range). FastDDM + algorithm also has better parallelism properties. -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. +More information on BlockLLS solver: +* memory requirements: ~[32*K^3+256*NPoints] bytes for KxK grid with + NPoints-sized dataset +* serial running time: O(K^4+NPoints) +* parallelism potential: limited. You may get some sublinear gain when + working with large grids (K's in 256..512 range) - -- ALGLIB PROJECT -- - Copyright 14.10.2011 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsesmv( - sparsematrix s, - bool isupper, - real_1d_array x, - real_1d_array& y); + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -
    - -
    -
    /************************************************************************* -This function efficiently swaps contents of S0 and S1. +INPUT PARAMETERS: + S - spline 2D builder object + LambdaNS- non-negative value: + * positive value means that some smoothing is applied + * zero value means that no smoothing is applied, and + corresponding entries of design matrix are numerically + zero and dropped from consideration. - -- ALGLIB PROJECT -- - Copyright 16.01.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparseswap(sparsematrix s0, sparsematrix s1); +
    void alglib::spline2dbuildersetalgoblocklls( + spline2dbuilder state, + double lambdans, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function performs efficient in-place transpose of SKS matrix. No -additional memory is allocated during transposition. - -This function supports only skyline storage format (SKS). +This function allows you to choose least squares solver used to perform +fitting. This function sets solver algorithm to "FastDDM", which performs +fast parallel fitting by splitting problem into smaller chunks and merging +results together. -INPUT PARAMETERS - S - sparse matrix in SKS format. - -OUTPUT PARAMETERS - S - sparse matrix, transposed. +This solver is optimized for large-scale problems, starting from 256x256 +grids, and up to 10000x10000 grids. Of course, it will work for smaller +grids too. - -- ALGLIB PROJECT -- - Copyright 16.01.2014 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::sparsetransposesks(sparsematrix s); +More detailed description of the algorithm is given below: +* algorithm generates hierarchy of nested grids, ranging from ~16x16 + (topmost "layer" of the model) to ~KX*KY one (final layer). Upper layers + model global behavior of the function, lower layers are used to model + fine details. Moving from layer to layer doubles grid density. +* fitting is started from topmost layer, subsequent layers are fitted + using residuals from previous ones. +* user may choose to skip generation of upper layers and generate only a + few bottom ones, which will result in much better performance and + parallelization efficiency, at the cost of algorithm inability to "patch" + large holes in the dataset. +* every layer is regularized using progressively increasing regularization + coefficient; thus, increasing LambdaV penalizes fine details first, + leaving lower frequencies almost intact for a while. +* after fitting is done, all layers are merged together into one bicubic + spline -
    - -
    -
    /************************************************************************* -This function calculates matrix-vector product op(S)*x, when x is vector, -S is symmetric triangular matrix, op(S) is transposition or no operation. -Matrix S must be stored in CRS or SKS format (exception will be thrown -otherwise). +IMPORTANT: regularization coefficient used by this solver is different + from the one used by BlockLLS. Latter utilizes nonlinearity + penalty, which is global in nature (large regularization + results in global linear trend being extracted); this solver + uses another, localized form of penalty, which is suitable for + parallel processing. -INPUT PARAMETERS - S - sparse square matrix in CRS or SKS format. - IsUpper - whether upper or lower triangle of S is used: - * if upper triangle is given, only S[i,j] for j>=i - are used, and lower triangle is ignored (it can be - empty - these elements are not referenced at all). - * if lower triangle is given, only S[i,j] for j<=i - are used, and upper triangle is ignored. - IsUnit - unit or non-unit diagonal: - * if True, diagonal elements of triangular matrix are - considered equal to 1.0. Actual elements stored in - S are not referenced at all. - * if False, diagonal stored in S is used - OpType - operation type: - * if 0, S*x is calculated - * if 1, (S^T)*x is calculated (transposition) - X - array[N] which stores input vector. For performance - reasons we make only quick checks - we check that - array size is at least N, but we do not check for - NAN's or INF's. - Y - possibly preallocated input buffer. Automatically - resized if its size is too small. +Notes on memory and performance: +* memory requirements: most memory is consumed during modeling of the + higher layers; ~[512*NPoints] bytes is required for a model with full + hierarchy of grids being generated. However, if you skip a few topmost + layers, you will get nearly constant (wrt. points count and grid size) + memory consumption. +* serial running time: O(K*K)+O(NPoints) for a KxK grid +* parallelism potential: good. You may get nearly linear speed-up when + performing fitting with just a few layers. Adding more layers results in + model becoming more global, which somewhat reduces efficiency of the + parallel code. -OUTPUT PARAMETERS - Y - array[N], op(S)*x + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. +INPUT PARAMETERS: + S - spline 2D builder object + NLayers - number of layers in the model: + * NLayers>=1 means that up to chosen number of bottom + layers is fitted + * NLayers=0 means that maximum number of layers is chosen + (according to current grid size) + * NLayers<=-1 means that up to |NLayers| topmost layers is + skipped + Recommendations: + * good "default" value is 2 layers + * you may need more layers, if your dataset is very + irregular and you want to "patch" large holes. For a + grid step H (equal to AreaWidth/GridSize) you may expect + that last layer reproduces variations at distance H (and + can patch holes that wide); that higher layers operate + at distances 2*H, 4*H, 8*H and so on. + * good value for "bullletproof" mode is NLayers=0, which + results in complete hierarchy of layers being generated. + LambdaV - regularization coefficient, chosen in such a way that it + penalizes bottom layers (fine details) first. + LambdaV>=0, zero value means that no penalty is applied. - -- ALGLIB PROJECT -- - Copyright 20.01.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsetrmv( - sparsematrix s, - bool isupper, - bool isunit, - ae_int_t optype, - real_1d_array& x, - real_1d_array& y); +
    void alglib::spline2dbuildersetalgofastddm( + spline2dbuilder state, + ae_int_t nlayers, + double lambdav, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function solves linear system op(S)*y=x where x is vector, S is -symmetric triangular matrix, op(S) is transposition or no operation. -Matrix S must be stored in CRS or SKS format (exception will be thrown -otherwise). +This function allows you to choose least squares solver used to perform +fitting. This function sets solver algorithm to "NaiveLLS". -INPUT PARAMETERS - S - sparse square matrix in CRS or SKS format. - IsUpper - whether upper or lower triangle of S is used: - * if upper triangle is given, only S[i,j] for j>=i - are used, and lower triangle is ignored (it can be - empty - these elements are not referenced at all). - * if lower triangle is given, only S[i,j] for j<=i - are used, and upper triangle is ignored. - IsUnit - unit or non-unit diagonal: - * if True, diagonal elements of triangular matrix are - considered equal to 1.0. Actual elements stored in - S are not referenced at all. - * if False, diagonal stored in S is used. It is your - responsibility to make sure that diagonal is - non-zero. - OpType - operation type: - * if 0, S*x is calculated - * if 1, (S^T)*x is calculated (transposition) - X - array[N] which stores input vector. For performance - reasons we make only quick checks - we check that - array size is at least N, but we do not check for - NAN's or INF's. +IMPORTANT: NaiveLLS is NOT intended to be used in real life code! This + algorithm solves problem by generated dense (K^2)x(K^2+NPoints) + matrix and solves linear least squares problem with dense + solver. -OUTPUT PARAMETERS - X - array[N], inv(op(S))*x + It is here just to test BlockLLS against reference solver + (and maybe for someone trying to compare well optimized solver + against straightforward approach to the LLS problem). -NOTE: this function throws exception when called for non-CRS/SKS matrix. - You must convert your matrix with SparseConvertToCRS/SKS() before - using this function. +More information on naive LLS solver: +* memory requirements: ~[8*K^4+256*NPoints] bytes for KxK grid. +* serial running time: O(K^6+NPoints) for KxK grid +* when compared with BlockLLS, NaiveLLS has ~K larger memory demand and + ~K^2 larger running time. -NOTE: no assertion or tests are done during algorithm operation. It is - your responsibility to provide invertible matrix to algorithm. +INPUT PARAMETERS: + S - spline 2D builder object + LambdaNS- nonsmoothness penalty - -- ALGLIB PROJECT -- - Copyright 20.01.2014 by Bochkanov Sergey + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::sparsetrsv( - sparsematrix s, - bool isupper, - bool isunit, - ae_int_t optype, - real_1d_array& x); +
    void alglib::spline2dbuildersetalgonaivells( + spline2dbuilder state, + double lambdans, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This function calculates vector-matrix-vector product x'*S*x, where S is -symmetric matrix. Matrix S must be stored in CRS or SKS format (exception -will be thrown otherwise). +This function sets area where 2D spline interpolant is built to +user-defined one: [XA,XB]*[YA,YB] -INPUT PARAMETERS - S - sparse M*M matrix in CRS or SKS format. - IsUpper - whether upper or lower triangle of S is given: - * if upper triangle is given, only S[i,j] for j>=i - are used, and lower triangle is ignored (it can be - empty - these elements are not referenced at all). - * if lower triangle is given, only S[i,j] for j<=i - are used, and upper triangle is ignored. - X - array[N], input vector. For performance reasons we - make only quick checks - we check that array size is - at least N, but we do not check for NAN's or INF's. +INPUT PARAMETERS: + S - spline 2D builder object + XA,XB - spatial extent in the first (X) dimension, XA<XB + YA,YB - spatial extent in the second (Y) dimension, YA<YB -RESULT - x'*S*x + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dbuildersetarea( + spline2dbuilder state, + double xa, + double xb, + double ya, + double yb, + const xparams _params = alglib::xdefault); -NOTE: this function throws exception when called for non-CRS/SKS matrix. -You must convert your matrix with SparseConvertToCRS/SKS() before using -this function. +
    + +
    +
    /************************************************************************* +This function sets area where 2D spline interpolant is built. "Auto" means +that area extent is determined automatically from dataset extent. - -- ALGLIB PROJECT -- - Copyright 27.01.2014 by Bochkanov Sergey +INPUT PARAMETERS: + S - spline 2D builder object + + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey *************************************************************************/ -
    double alglib::sparsevsmv(sparsematrix s, bool isupper, real_1d_array x); +
    void alglib::spline2dbuildersetareaauto( + spline2dbuilder state, + const xparams _params = alglib::xdefault);
    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +This function sets constant prior term (model is a sum of bicubic spline +and global prior, which can be linear, constant, user-defined constant or +zero). +Constant prior term is determined by least squares fitting. -int main(int argc, char **argv) -{ - // - // This example demonstrates creation/initialization of the sparse matrix - // and matrix-vector multiplication. - // - // First, we have to create matrix and initialize it. Matrix is initially created - // in the Hash-Table format, which allows convenient initialization. We can modify - // Hash-Table matrix with sparseset() and sparseadd() functions. - // - // NOTE: Unlike CRS format, Hash-Table representation allows you to initialize - // elements in the arbitrary order. You may see that we initialize a[0][0] first, - // then move to the second row, and then move back to the first row. - // - sparsematrix s; - sparsecreate(2, 2, s); - sparseset(s, 0, 0, 2.0); - sparseset(s, 1, 1, 1.0); - sparseset(s, 0, 1, 1.0); +INPUT PARAMETERS: + S - spline builder - sparseadd(s, 1, 1, 4.0); + -- ALGLIB -- + Copyright 01.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dbuildersetconstterm( + spline2dbuilder state, + const xparams _params = alglib::xdefault); - // - // Now S is equal to - // [ 2 1 ] - // [ 5 ] - // Lets check it by reading matrix contents with sparseget(). - // You may see that with sparseget() you may read both non-zero - // and zero elements. - // - double v; - v = sparseget(s, 0, 0); - printf("%.2f\n", double(v)); // EXPECTED: 2.0000 - v = sparseget(s, 0, 1); - printf("%.2f\n", double(v)); // EXPECTED: 1.0000 - v = sparseget(s, 1, 0); - printf("%.2f\n", double(v)); // EXPECTED: 0.0000 - v = sparseget(s, 1, 1); - printf("%.2f\n", double(v)); // EXPECTED: 5.0000 +
    + +
    +
    /************************************************************************* +This function sets nodes count for 2D spline interpolant. Fitting is +performed on area defined with one of the "setarea" functions; this one +sets number of nodes placed upon the fitting area. - // - // After successful creation we can use our matrix for linear operations. - // - // However, there is one more thing we MUST do before using S in linear - // operations: we have to convert it from HashTable representation (used for - // initialization and dynamic operations) to CRS format with sparseconverttocrs() - // call. If you omit this call, ALGLIB will generate exception on the first - // attempt to use S in linear operations. - // - sparseconverttocrs(s); +INPUT PARAMETERS: + S - spline 2D builder object + KX - nodes count for the first (X) dimension; fitting interval + [XA,XB] is separated into KX-1 subintervals, with KX nodes + created at the boundaries. + KY - nodes count for the first (Y) dimension; fitting interval + [YA,YB] is separated into KY-1 subintervals, with KY nodes + created at the boundaries. - // - // Now S is in the CRS format and we are ready to do linear operations. - // Lets calculate A*x for some x. - // - real_1d_array x = "[1,-1]"; - real_1d_array y = "[]"; - sparsemv(s, x, y); - printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [1.000,-5.000] - return 0; -} +NOTE: at least 4 nodes is created in each dimension, so KX and KY are + silently increased if needed. + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dbuildersetgrid( + spline2dbuilder state, + ae_int_t kx, + ae_int_t ky, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "linalg.h"
    +
    /************************************************************************* +This function sets linear prior term (model is a sum of bicubic spline and +global prior, which can be linear, constant, user-defined constant or +zero). -using namespace alglib; +Linear prior term is determined by least squares fitting. +INPUT PARAMETERS: + S - spline builder -int main(int argc, char **argv) -{ - // - // This example demonstrates creation/initialization of the sparse matrix in the - // CRS format. - // - // Hash-Table format used by default is very convenient (it allows easy - // insertion of elements, automatic memory reallocation), but has - // significant memory and performance overhead. Insertion of one element - // costs hundreds of CPU cycles, and memory consumption is several times - // higher than that of CRS. - // - // When you work with really large matrices and when you can tell in - // advance how many elements EXACTLY you need, it can be beneficial to - // create matrix in the CRS format from the very beginning. - // - // If you want to create matrix in the CRS format, you should: - // * use sparsecreatecrs() function - // * know row sizes in advance (number of non-zero entries in the each row) - // * initialize matrix with sparseset() - another function, sparseadd(), is not allowed - // * initialize elements from left to right, from top to bottom, each - // element is initialized only once. - // - sparsematrix s; - integer_1d_array row_sizes = "[2,2,2,1]"; - sparsecreatecrs(4, 4, row_sizes, s); - sparseset(s, 0, 0, 2.0); - sparseset(s, 0, 1, 1.0); - sparseset(s, 1, 1, 4.0); - sparseset(s, 1, 2, 2.0); - sparseset(s, 2, 2, 3.0); - sparseset(s, 2, 3, 1.0); - sparseset(s, 3, 3, 9.0); + -- ALGLIB -- + Copyright 01.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dbuildersetlinterm( + spline2dbuilder state, + const xparams _params = alglib::xdefault); - // - // Now S is equal to - // [ 2 1 ] - // [ 4 2 ] - // [ 3 1 ] - // [ 9 ] - // - // We should point that we have initialized S elements from left to right, - // from top to bottom. CRS representation does NOT allow you to do so in - // the different order. Try to change order of the sparseset() calls above, - // and you will see that your program generates exception. - // - // We can check it by reading matrix contents with sparseget(). - // However, you should remember that sparseget() is inefficient on - // CRS matrices (it may have to pass through all elements of the row - // until it finds element you need). - // - double v; - v = sparseget(s, 0, 0); - printf("%.2f\n", double(v)); // EXPECTED: 2.0000 - v = sparseget(s, 2, 3); - printf("%.2f\n", double(v)); // EXPECTED: 1.0000 +
    + +
    +
    /************************************************************************* +This function adds dataset to the builder object. - // you may see that you can read zero elements (which are not stored) with sparseget() - v = sparseget(s, 3, 2); - printf("%.2f\n", double(v)); // EXPECTED: 0.0000 +This function overrides results of the previous calls, i.e. multiple calls +of this function will result in only the last set being added. - // - // After successful creation we can use our matrix for linear operations. - // Lets calculate A*x for some x. - // - real_1d_array x = "[1,-1,1,-1]"; - real_1d_array y = "[]"; - sparsemv(s, x, y); - printf("%s\n", y.tostring(2).c_str()); // EXPECTED: [1.000,-2.000,2.000,-9] - return 0; -} +INPUT PARAMETERS: + S - spline 2D builder object + XY - points, array[N,2+D]. One row corresponds to one point + in the dataset. First 2 elements are coordinates, next + D elements are function values. Array may be larger than + specified, in this case only leading [N,NX+NY] elements + will be used. + N - number of points in the dataset + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dbuildersetpoints( + spline2dbuilder state, + real_2d_array xy, + ae_int_t n, + const xparams _params = alglib::xdefault); -
    -
    - -smatrixgevd
    -smatrixgevdreduce
    - - -
    - + +
     
    /************************************************************************* -Algorithm for solving the following generalized symmetric positive-definite -eigenproblem: - A*x = lambda*B*x (1) or - A*B*x = lambda*x (2) or - B*A*x = lambda*x (3). -where A is a symmetric matrix, B - symmetric positive-definite matrix. -The problem is solved by reducing it to an ordinary symmetric eigenvalue -problem. +This function sets constant prior term (model is a sum of bicubic spline +and global prior, which can be linear, constant, user-defined constant or +zero). -Input parameters: - A - symmetric matrix which is given by its upper or lower - triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrices A and B. - IsUpperA - storage format of matrix A. - B - symmetric positive-definite matrix which is given by - its upper or lower triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - IsUpperB - storage format of matrix B. - ZNeeded - if ZNeeded is equal to: - * 0, the eigenvectors are not returned; - * 1, the eigenvectors are returned. - ProblemType - if ProblemType is equal to: - * 1, the following problem is solved: A*x = lambda*B*x; - * 2, the following problem is solved: A*B*x = lambda*x; - * 3, the following problem is solved: B*A*x = lambda*x. +Constant prior term is determined by least squares fitting. -Output parameters: - D - eigenvalues in ascending order. - Array whose index ranges within [0..N-1]. - Z - if ZNeeded is equal to: - * 0, Z hasn’t changed; - * 1, Z contains eigenvectors. - Array whose indexes range within [0..N-1, 0..N-1]. - The eigenvectors are stored in matrix columns. It should - be noted that the eigenvectors in such problems do not - form an orthogonal system. +INPUT PARAMETERS: + S - spline builder + V - value for user-defined prior -Result: - True, if the problem was solved successfully. - False, if the error occurred during the Cholesky decomposition of matrix - B (the matrix isn’t positive-definite) or during the work of the iterative - algorithm for solving the symmetric eigenproblem. + -- ALGLIB -- + Copyright 01.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dbuildersetuserterm( + spline2dbuilder state, + double v, + const xparams _params = alglib::xdefault); -See also the GeneralizedSymmetricDefiniteEVDReduce subroutine. +
    + +
    +
    /************************************************************************* +This function sets zero prior term (model is a sum of bicubic spline and +global prior, which can be linear, constant, user-defined constant or +zero). + +INPUT PARAMETERS: + S - spline builder -- ALGLIB -- - Copyright 1.28.2006 by Bochkanov Sergey + Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixgevd( - real_2d_array a, - ae_int_t n, - bool isuppera, - real_2d_array b, - bool isupperb, - ae_int_t zneeded, - ae_int_t problemtype, - real_1d_array& d, - real_2d_array& z); +
    void alglib::spline2dbuildersetzeroterm( + spline2dbuilder state, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Algorithm for reduction of the following generalized symmetric positive- -definite eigenvalue problem: - A*x = lambda*B*x (1) or - A*B*x = lambda*x (2) or - B*A*x = lambda*x (3) -to the symmetric eigenvalues problem C*y = lambda*y (eigenvalues of this and -the given problems are the same, and the eigenvectors of the given problem -could be obtained by multiplying the obtained eigenvectors by the -transformation matrix x = R*y). - -Here A is a symmetric matrix, B - symmetric positive-definite matrix. +This subroutine calculates the value of the bilinear or bicubic spline at +the given point X. Input parameters: - A - symmetric matrix which is given by its upper or lower - triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrices A and B. - IsUpperA - storage format of matrix A. - B - symmetric positive-definite matrix which is given by - its upper or lower triangular part. - Array whose indexes range within [0..N-1, 0..N-1]. - IsUpperB - storage format of matrix B. - ProblemType - if ProblemType is equal to: - * 1, the following problem is solved: A*x = lambda*B*x; - * 2, the following problem is solved: A*B*x = lambda*x; - * 3, the following problem is solved: B*A*x = lambda*x. - -Output parameters: - A - symmetric matrix which is given by its upper or lower - triangle depending on IsUpperA. Contains matrix C. - Array whose indexes range within [0..N-1, 0..N-1]. - R - upper triangular or low triangular transformation matrix - which is used to obtain the eigenvectors of a given problem - as the product of eigenvectors of C (from the right) and - matrix R (from the left). If the matrix is upper - triangular, the elements below the main diagonal - are equal to 0 (and vice versa). Thus, we can perform - the multiplication without taking into account the - internal structure (which is an easier though less - effective way). - Array whose indexes range within [0..N-1, 0..N-1]. - IsUpperR - type of matrix R (upper or lower triangular). + C - 2D spline object. + Built by spline2dbuildbilinearv or spline2dbuildbicubicv. + X, Y- point Result: - True, if the problem was reduced successfully. - False, if the error occurred during the Cholesky decomposition of - matrix B (the matrix is not positive-definite). + S(x,y) - -- ALGLIB -- - Copyright 1.28.2006 by Bochkanov Sergey + -- ALGLIB PROJECT -- + Copyright 05.07.2007 by Bochkanov Sergey *************************************************************************/ -
    bool alglib::smatrixgevdreduce( - real_2d_array& a, - ae_int_t n, - bool isuppera, - real_2d_array b, - bool isupperb, - ae_int_t problemtype, - real_2d_array& r, - bool& isupperr); +
    double alglib::spline2dcalc( + spline2dinterpolant c, + double x, + double y, + const xparams _params = alglib::xdefault);
    - - - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -1-dimensional spline interpolant +This subroutine calculates bilinear or bicubic vector-valued spline at the +given point (X,Y). + +INPUT PARAMETERS: + C - spline interpolant. + X, Y- point + +OUTPUT PARAMETERS: + F - array[D] which stores function values. F is out-parameter and + it is reallocated after call to this function. In case you + want to reuse previously allocated F, you may use + Spline2DCalcVBuf(), which reallocates F only when it is too + small. + + -- ALGLIB PROJECT -- + Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ -
    class spline1dinterpolant -{ -}; +
    void alglib::spline2dcalcv( + spline2dinterpolant c, + double x, + double y, + real_1d_array& f, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine builds Akima spline interpolant +This subroutine calculates bilinear or bicubic vector-valued spline at the +given point (X,Y). + +If you need just some specific component of vector-valued spline, you can +use spline2dcalcvi() function. INPUT PARAMETERS: - X - spline nodes, array[0..N-1] - Y - function values, array[0..N-1] - N - points count (optional): - * N>=2 - * if given, only first N points are used to build spline - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) + C - spline interpolant. + X, Y- point + F - output buffer, possibly preallocated array. In case array size + is large enough to store result, it is not reallocated. Array + which is too short will be reallocated OUTPUT PARAMETERS: - C - spline interpolant + F - array[D] (or larger) which stores function values + -- ALGLIB PROJECT -- + Copyright 01.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dcalcvbuf( + spline2dinterpolant c, + double x, + double y, + real_1d_array& f, + const xparams _params = alglib::xdefault); -ORDER OF POINTS +
    + +
    +
    /************************************************************************* +This subroutine calculates specific component of vector-valued bilinear or +bicubic spline at the given point (X,Y). -Subroutine automatically sorts points, so caller may pass unsorted array. +INPUT PARAMETERS: + C - spline interpolant. + X, Y- point + I - component index, in [0,D). An exception is generated for out + of range values. + +RESULT: + value of I-th component -- ALGLIB PROJECT -- - Copyright 24.06.2007 by Bochkanov Sergey + Copyright 01.02.2018 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dbuildakima( - real_1d_array x, - real_1d_array y, - spline1dinterpolant& c); -void alglib::spline1dbuildakima( - real_1d_array x, - real_1d_array y, - ae_int_t n, - spline1dinterpolant& c); +
    double alglib::spline2dcalcvi( + spline2dinterpolant c, + double x, + double y, + ae_int_t i, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine makes the copy of the spline model. + +Input parameters: + C - spline interpolant + +Output parameters: + CC - spline copy + + -- ALGLIB PROJECT -- + Copyright 29.06.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dcopy( + spline2dinterpolant c, + spline2dinterpolant& cc, + const xparams _params = alglib::xdefault); + +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This subroutine calculates the value of the bilinear or bicubic spline at +the given point X and its derivatives. + +Input parameters: + C - spline interpolant. + X, Y- point + +Output parameters: + F - S(x,y) + FX - dS(x,y)/dX + FY - dS(x,y)/dY + FXY - d2S(x,y)/dXdY + + -- ALGLIB PROJECT -- + Copyright 05.07.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2ddiff( + spline2dinterpolant c, + double x, + double y, + double& f, + double& fx, + double& fy, + double& fxy, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +This subroutine calculates value of specific component of bilinear or +bicubic vector-valued spline and its derivatives. + +Input parameters: + C - spline interpolant. + X, Y- point + I - component index, in [0,D) + +Output parameters: + F - S(x,y) + FX - dS(x,y)/dX + FY - dS(x,y)/dY + FXY - d2S(x,y)/dXdY + + -- ALGLIB PROJECT -- + Copyright 05.07.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2ddiffvi( + spline2dinterpolant c, + double x, + double y, + ae_int_t i, + double& f, + double& fx, + double& fy, + double& fxy, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine builds Catmull-Rom spline interpolant. +This function fits bicubic spline to current dataset, using current area/ +grid and current LLS solver. -INPUT PARAMETERS: - X - spline nodes, array[0..N-1]. - Y - function values, array[0..N-1]. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points are used to build spline - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundType - boundary condition type: - * -1 for periodic boundary condition - * 0 for parabolically terminated spline (default) - Tension - tension parameter: - * tension=0 corresponds to classic Catmull-Rom spline (default) - * 0<tension<1 corresponds to more general form - cardinal spline +INPUT PARAMETERS: + State - spline 2D builder object OUTPUT PARAMETERS: - C - spline interpolant - + S - 2D spline, fit result + Rep - fitting report, which provides some additional info about + errors, R2 coefficient and so on. -ORDER OF POINTS + -- ALGLIB -- + Copyright 05.02.2018 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dfit( + spline2dbuilder state, + spline2dinterpolant& s, + spline2dfitreport& rep, + const xparams _params = alglib::xdefault); -Subroutine automatically sorts points, so caller may pass unsorted array. +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +This subroutine performs linear transformation of the spline. -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: +Input parameters: + C - spline interpolant. + A, B- transformation coefficients: S2(x,y) = A*S(x,y) + B -Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. -However, this subroutine doesn't require you to specify equal values for -the first and last points - it automatically forces them to be equal by -copying Y[first_point] (corresponds to the leftmost, minimal X[]) to -Y[last_point]. However it is recommended to pass consistent values of Y[], -i.e. to make Y[first_point]=Y[last_point]. +Output parameters: + C - transformed spline -- ALGLIB PROJECT -- - Copyright 23.06.2007 by Bochkanov Sergey + Copyright 30.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dbuildcatmullrom( - real_1d_array x, - real_1d_array y, - spline1dinterpolant& c); -void alglib::spline1dbuildcatmullrom( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundtype, - double tension, - spline1dinterpolant& c); +
    void alglib::spline2dlintransf( + spline2dinterpolant c, + double a, + double b, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine builds cubic spline interpolant. +This subroutine performs linear transformation of the spline argument. -INPUT PARAMETERS: - X - spline nodes, array[0..N-1]. - Y - function values, array[0..N-1]. +Input parameters: + C - spline interpolant + AX, BX - transformation coefficients: x = A*t + B + AY, BY - transformation coefficients: y = A*u + B +Result: + C - transformed spline -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points are used to build spline - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundLType - boundary condition type for the left boundary - BoundL - left boundary condition (first or second derivative, - depending on the BoundLType) - BoundRType - boundary condition type for the right boundary - BoundR - right boundary condition (first or second derivative, - depending on the BoundRType) + -- ALGLIB PROJECT -- + Copyright 30.06.2007 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dlintransxy( + spline2dinterpolant c, + double ax, + double bx, + double ay, + double by, + const xparams _params = alglib::xdefault); -OUTPUT PARAMETERS: - C - spline interpolant +
    +

    Examples:   [1]  

    + +
    +
    /************************************************************************* +Bicubic spline resampling -ORDER OF POINTS +Input parameters: + A - function values at the old grid, + array[0..OldHeight-1, 0..OldWidth-1] + OldHeight - old grid height, OldHeight>1 + OldWidth - old grid width, OldWidth>1 + NewHeight - new grid height, NewHeight>1 + NewWidth - new grid width, NewWidth>1 -Subroutine automatically sorts points, so caller may pass unsorted array. +Output parameters: + B - function values at the new grid, + array[0..NewHeight-1, 0..NewWidth-1] -SETTING BOUNDARY VALUES: + -- ALGLIB routine -- + 15 May, 2007 + Copyright by Bochkanov Sergey +*************************************************************************/ +
    void alglib::spline2dresamplebicubic( + real_2d_array a, + ae_int_t oldheight, + ae_int_t oldwidth, + real_2d_array& b, + ae_int_t newheight, + ae_int_t newwidth, + const xparams _params = alglib::xdefault); -The BoundLType/BoundRType parameters can have the following values: - * -1, which corresonds to the periodic (cyclic) boundary conditions. - In this case: - * both BoundLType and BoundRType must be equal to -1. - * BoundL/BoundR are ignored - * Y[last] is ignored (it is assumed to be equal to Y[first]). - * 0, which corresponds to the parabolically terminated spline - (BoundL and/or BoundR are ignored). - * 1, which corresponds to the first derivative boundary condition - * 2, which corresponds to the second derivative boundary condition - * by default, BoundType=0 is used +
    + +
    +
    /************************************************************************* +Bilinear spline resampling -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: +Input parameters: + A - function values at the old grid, + array[0..OldHeight-1, 0..OldWidth-1] + OldHeight - old grid height, OldHeight>1 + OldWidth - old grid width, OldWidth>1 + NewHeight - new grid height, NewHeight>1 + NewWidth - new grid width, NewWidth>1 -Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. -However, this subroutine doesn't require you to specify equal values for -the first and last points - it automatically forces them to be equal by -copying Y[first_point] (corresponds to the leftmost, minimal X[]) to -Y[last_point]. However it is recommended to pass consistent values of Y[], -i.e. to make Y[first_point]=Y[last_point]. +Output parameters: + B - function values at the new grid, + array[0..NewHeight-1, 0..NewWidth-1] - -- ALGLIB PROJECT -- - Copyright 23.06.2007 by Bochkanov Sergey + -- ALGLIB routine -- + 09.07.2007 + Copyright by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dbuildcubic( - real_1d_array x, - real_1d_array y, - spline1dinterpolant& c); -void alglib::spline1dbuildcubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundltype, - double boundl, - ae_int_t boundrtype, - double boundr, - spline1dinterpolant& c); +
    void alglib::spline2dresamplebilinear( + real_2d_array a, + ae_int_t oldheight, + ae_int_t oldwidth, + real_2d_array& b, + ae_int_t newheight, + ae_int_t newwidth, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine builds Hermite spline interpolant. - -INPUT PARAMETERS: - X - spline nodes, array[0..N-1] - Y - function values, array[0..N-1] - D - derivatives, array[0..N-1] - N - points count (optional): - * N>=2 - * if given, only first N points are used to build spline - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - -OUTPUT PARAMETERS: - C - spline interpolant. - +This function serializes data structure to string. -ORDER OF POINTS +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +
    void spline2dserialize(spline2dinterpolant &obj, std::string &s_out); +void spline2dserialize(spline2dinterpolant &obj, std::ostream &s_out); +
    + +
    +
    /************************************************************************* +This subroutine was deprecated in ALGLIB 3.6.0 -Subroutine automatically sorts points, so caller may pass unsorted array. +We recommend you to switch to Spline2DUnpackV(), which is more flexible +and accepts its arguments in more convenient order. -- ALGLIB PROJECT -- - Copyright 23.06.2007 by Bochkanov Sergey + Copyright 29.06.2007 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dbuildhermite( - real_1d_array x, - real_1d_array y, - real_1d_array d, - spline1dinterpolant& c); -void alglib::spline1dbuildhermite( - real_1d_array x, - real_1d_array y, - real_1d_array d, - ae_int_t n, - spline1dinterpolant& c); +
    void alglib::spline2dunpack( + spline2dinterpolant c, + ae_int_t& m, + ae_int_t& n, + real_2d_array& tbl, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine builds linear spline interpolant - -INPUT PARAMETERS: - X - spline nodes, array[0..N-1] - Y - function values, array[0..N-1] - N - points count (optional): - * N>=2 - * if given, only first N points are used to build spline - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) +This subroutine unpacks two-dimensional spline into the coefficients table -OUTPUT PARAMETERS: - C - spline interpolant +Input parameters: + C - spline interpolant. +Result: + M, N- grid size (x-axis and y-axis) + D - number of components + Tbl - coefficients table, unpacked format, + D - components: [0..(N-1)*(M-1)*D-1, 0..19]. + For T=0..D-1 (component index), I = 0...N-2 (x index), + J=0..M-2 (y index): + K := T + I*D + J*D*(N-1) -ORDER OF POINTS + K-th row stores decomposition for T-th component of the + vector-valued function -Subroutine automatically sorts points, so caller may pass unsorted array. + Tbl[K,0] = X[i] + Tbl[K,1] = X[i+1] + Tbl[K,2] = Y[j] + Tbl[K,3] = Y[j+1] + Tbl[K,4] = C00 + Tbl[K,5] = C01 + Tbl[K,6] = C02 + Tbl[K,7] = C03 + Tbl[K,8] = C10 + Tbl[K,9] = C11 + ... + Tbl[K,19] = C33 + On each grid square spline is equals to: + S(x) = SUM(c[i,j]*(t^i)*(u^j), i=0..3, j=0..3) + t = x-x[j] + u = y-y[i] -- ALGLIB PROJECT -- - Copyright 24.06.2007 by Bochkanov Sergey + Copyright 16.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dbuildlinear( - real_1d_array x, - real_1d_array y, - spline1dinterpolant& c); -void alglib::spline1dbuildlinear( - real_1d_array x, - real_1d_array y, - ae_int_t n, - spline1dinterpolant& c); +
    void alglib::spline2dunpackv( + spline2dinterpolant c, + ae_int_t& m, + ae_int_t& n, + ae_int_t& d, + real_2d_array& tbl, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function builds monotone cubic Hermite interpolant. This interpolant -is monotonic in [x(0),x(n-1)] and is constant outside of this interval. - -In case y[] form non-monotonic sequence, interpolant is piecewise -monotonic. Say, for x=(0,1,2,3,4) and y=(0,1,2,1,0) interpolant will -monotonically grow at [0..2] and monotonically decrease at [2..4]. +This function unserializes data structure from string. +*************************************************************************/ +
    void spline2dunserialize(const std::string &s_in, spline2dinterpolant &obj); +void spline2dunserialize(const std::istream &s_in, spline2dinterpolant &obj); +
    + +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -INPUT PARAMETERS:
    -    X           -   spline nodes, array[0..N-1]. Subroutine automatically
    -                    sorts points, so caller may pass unsorted array.
    -    Y           -   function values, array[0..N-1]
    -    N           -   the number of points(N>=2).
    +using namespace alglib;
     
    -OUTPUT PARAMETERS:
    -    C           -   spline interpolant.
     
    - -- ALGLIB PROJECT --
    -     Copyright 21.06.2012 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::spline1dbuildmonotone( - real_1d_array x, - real_1d_array y, - spline1dinterpolant& c); -void alglib::spline1dbuildmonotone( - real_1d_array x, - real_1d_array y, - ae_int_t n, - spline1dinterpolant& c); +int main(int argc, char **argv) +{ + // + // We use bilinear spline to interpolate f(x,y)=x^2+2*y^2 sampled + // at (x,y) from [0.0, 0.5, 1.0] X [0.0, 1.0]. + // + real_1d_array x = "[0.0, 0.5, 1.0]"; + real_1d_array y = "[0.0, 1.0]"; + real_1d_array f = "[0.00,0.25,1.00,2.00,2.25,3.00]"; + double vx = 0.25; + double vy = 0.50; + double v; + double dx; + double dy; + double dxy; + spline2dinterpolant s; -
    -

    Examples:   [1]  

    - -
    -
    /************************************************************************* -This subroutine calculates the value of the spline at the given point X. + // build spline + spline2dbuildbicubicv(x, 3, y, 2, f, 1, s); -INPUT PARAMETERS: - C - spline interpolant - X - point + // calculate S(0.25,0.50) + v = spline2dcalc(s, vx, vy); + printf("%.4f\n", double(v)); // EXPECTED: 1.0625 -Result: - S(x) + // calculate derivatives + spline2ddiff(s, vx, vy, v, dx, dy, dxy); + printf("%.4f\n", double(v)); // EXPECTED: 1.0625 + printf("%.4f\n", double(dx)); // EXPECTED: 0.5000 + printf("%.4f\n", double(dy)); // EXPECTED: 2.0000 + return 0; +} - -- ALGLIB PROJECT -- - Copyright 23.06.2007 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::spline1dcalc(spline1dinterpolant c, double x); -
    -

    Examples:   [1]  [2]  [3]  

    - +
    -
    /************************************************************************* -This function solves following problem: given table y[] of function values -at old nodes x[] and new nodes x2[], it calculates and returns table of -function values y2[] (calculated at x2[]). +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -This function yields same result as Spline1DBuildCubic() call followed by -sequence of Spline1DDiff() calls, but it can be several times faster when -called for ordered X[] and X2[]. +using namespace alglib; -INPUT PARAMETERS: - X - old spline nodes - Y - function values - X2 - new spline nodes -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points from X/Y are used - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundLType - boundary condition type for the left boundary - BoundL - left boundary condition (first or second derivative, - depending on the BoundLType) - BoundRType - boundary condition type for the right boundary - BoundR - right boundary condition (first or second derivative, - depending on the BoundRType) - N2 - new points count: - * N2>=2 - * if given, only first N2 points from X2 are used - * if not given, automatically detected from X2 size +int main(int argc, char **argv) +{ + // + // We use bilinear spline to interpolate f(x,y)=x^2+2*y^2 sampled + // at (x,y) from [0.0, 0.5, 1.0] X [0.0, 1.0]. + // + real_1d_array x = "[0.0, 0.5, 1.0]"; + real_1d_array y = "[0.0, 1.0]"; + real_1d_array f = "[0.00,0.25,1.00,2.00,2.25,3.00]"; + double vx = 0.25; + double vy = 0.50; + double v; + spline2dinterpolant s; -OUTPUT PARAMETERS: - F2 - function values at X2[] + // build spline + spline2dbuildbilinearv(x, 3, y, 2, f, 1, s); -ORDER OF POINTS + // calculate S(0.25,0.50) + v = spline2dcalc(s, vx, vy); + printf("%.4f\n", double(v)); // EXPECTED: 1.1250 + return 0; +} -Subroutine automatically sorts points, so caller may pass unsorted array. -Function values are correctly reordered on return, so F2[I] is always -equal to S(X2[I]) independently of points order. -SETTING BOUNDARY VALUES: +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -The BoundLType/BoundRType parameters can have the following values:
    -    * -1, which corresonds to the periodic (cyclic) boundary conditions.
    -          In this case:
    -          * both BoundLType and BoundRType must be equal to -1.
    -          * BoundL/BoundR are ignored
    -          * Y[last] is ignored (it is assumed to be equal to Y[first]).
    -    *  0, which  corresponds  to  the  parabolically   terminated  spline
    -          (BoundL and/or BoundR are ignored).
    -    *  1, which corresponds to the first derivative boundary condition
    -    *  2, which corresponds to the second derivative boundary condition
    -    *  by default, BoundType=0 is used
    +using namespace alglib;
     
    -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS:
     
    -Problems with periodic boundary conditions have Y[first_point]=Y[last_point].
    -However, this subroutine doesn't require you to specify equal  values  for
    -the first and last points - it automatically forces them  to  be  equal by
    -copying  Y[first_point]  (corresponds  to the leftmost,  minimal  X[])  to
    -Y[last_point]. However it is recommended to pass consistent values of Y[],
    -i.e. to make Y[first_point]=Y[last_point].
    +int main(int argc, char **argv)
    +{
    +    //
    +    // We build bilinear spline for f(x,y)=x+2*y for (x,y) in [0,1].
    +    // Then we apply several transformations to this spline.
    +    //
    +    real_1d_array x = "[0.0, 1.0]";
    +    real_1d_array y = "[0.0, 1.0]";
    +    real_1d_array f = "[0.00,1.00,2.00,3.00]";
    +    spline2dinterpolant s;
    +    spline2dinterpolant snew;
    +    double v;
    +    spline2dbuildbilinearv(x, 2, y, 2, f, 1, s);
     
    -  -- ALGLIB PROJECT --
    -     Copyright 03.09.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::spline1dconvcubic( - real_1d_array x, - real_1d_array y, - real_1d_array x2, - real_1d_array& y2); -void alglib::spline1dconvcubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundltype, - double boundl, - ae_int_t boundrtype, - double boundr, - real_1d_array x2, - ae_int_t n2, - real_1d_array& y2); + // copy spline, apply transformation x:=2*xnew, y:=4*ynew + // evaluate at (xnew,ynew) = (0.25,0.25) - should be same as (x,y)=(0.5,1.0) + spline2dcopy(s, snew); + spline2dlintransxy(snew, 2.0, 0.0, 4.0, 0.0); + v = spline2dcalc(snew, 0.25, 0.25); + printf("%.4f\n", double(v)); // EXPECTED: 2.500 + + // copy spline, apply transformation SNew:=2*S+3 + spline2dcopy(s, snew); + spline2dlintransf(snew, 2.0, 3.0); + v = spline2dcalc(snew, 0.5, 1.0); + printf("%.4f\n", double(v)); // EXPECTED: 8.000 -
    -

    Examples:   [1]  

    - -
    -
    /************************************************************************* -This function solves following problem: given table y[] of function values -at old nodes x[] and new nodes x2[], it calculates and returns table of -function values y2[], first and second derivatives d2[] and dd2[] -(calculated at x2[]). + // + // Same example, but for vector spline (f0,f1) = {x+2*y, 2*x+y} + // + real_1d_array f2 = "[0.00,0.00, 1.00,2.00, 2.00,1.00, 3.00,3.00]"; + real_1d_array vr; + spline2dbuildbilinearv(x, 2, y, 2, f2, 2, s); -This function yields same result as Spline1DBuildCubic() call followed by -sequence of Spline1DDiff() calls, but it can be several times faster when -called for ordered X[] and X2[]. + // copy spline, apply transformation x:=2*xnew, y:=4*ynew + spline2dcopy(s, snew); + spline2dlintransxy(snew, 2.0, 0.0, 4.0, 0.0); + spline2dcalcv(snew, 0.25, 0.25, vr); + printf("%s\n", vr.tostring(4).c_str()); // EXPECTED: [2.500,2.000] -INPUT PARAMETERS: - X - old spline nodes - Y - function values - X2 - new spline nodes + // copy spline, apply transformation SNew:=2*S+3 + spline2dcopy(s, snew); + spline2dlintransf(snew, 2.0, 3.0); + spline2dcalcv(snew, 0.5, 1.0, vr); + printf("%s\n", vr.tostring(4).c_str()); // EXPECTED: [8.000,7.000] + return 0; +} -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points from X/Y are used - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundLType - boundary condition type for the left boundary - BoundL - left boundary condition (first or second derivative, - depending on the BoundLType) - BoundRType - boundary condition type for the right boundary - BoundR - right boundary condition (first or second derivative, - depending on the BoundRType) - N2 - new points count: - * N2>=2 - * if given, only first N2 points from X2 are used - * if not given, automatically detected from X2 size -OUTPUT PARAMETERS: - F2 - function values at X2[] - D2 - first derivatives at X2[] - DD2 - second derivatives at X2[] +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -ORDER OF POINTS
    +using namespace alglib;
     
    -Subroutine automatically sorts points, so caller  may pass unsorted array.
    -Function  values  are correctly reordered on  return, so F2[I]  is  always
    -equal to S(X2[I]) independently of points order.
     
    -SETTING BOUNDARY VALUES:
    +int main(int argc, char **argv)
    +{
    +    //
    +    // We use bicubic spline to reproduce f(x,y)=1/(1+x^2+2*y^2) sampled
    +    // at irregular points (x,y) from [-1,+1]*[-1,+1]
    +    //
    +    // We have 5 such points, located approximately at corners of the area
    +    // and its center -  but not exactly at the grid. Thus, we have to FIT
    +    // the spline, i.e. to solve least squares problem
    +    //
    +    real_2d_array xy = "[[-0.987,-0.902,0.359],[0.948,-0.992,0.347],[-1.000,1.000,0.333],[1.000,0.973,0.339],[0.017,0.180,0.968]]";
    +
    +    //
    +    // First step is to create spline2dbuilder object and set its properties:
    +    // * d=1 means that we create vector-valued spline with 1 component
    +    // * we specify dataset xy
    +    // * we rely on automatic selection of interpolation area
    +    // * we tell builder that we want to use 5x5 grid for an underlying spline
    +    // * we choose least squares solver named BlockLLS and configure it by
    +    //   telling that we want to apply zero nonlinearity penalty.
    +    //
    +    // NOTE: you can specify non-zero lambdav if you want to make your spline
    +    //       more "rigid", i.e. to penalize nonlinearity.
    +    //
    +    // NOTE: ALGLIB has two solvers which fit bicubic splines to irregular data,
    +    //       one of them is BlockLLS and another one is FastDDM. Former is
    +    //       intended for moderately sized grids (up to 512x512 nodes, although
    +    //       it may take up to few minutes); it is the most easy to use and
    +    //       control spline fitting function in the library. Latter, FastDDM,
    +    //       is intended for efficient solution of large-scale problems
    +    //       (up to 100.000.000 nodes). Both solvers can be parallelized, but
    +    //       FastDDM is much more efficient. See comments for more information.
    +    //
    +    spline2dbuilder builder;
    +    ae_int_t d = 1;
    +    double lambdav = 0.000;
    +    spline2dbuildercreate(d, builder);
    +    spline2dbuildersetpoints(builder, xy, 5);
    +    spline2dbuildersetgrid(builder, 5, 5);
    +    spline2dbuildersetalgoblocklls(builder, lambdav);
     
    -The BoundLType/BoundRType parameters can have the following values:
    -    * -1, which corresonds to the periodic (cyclic) boundary conditions.
    -          In this case:
    -          * both BoundLType and BoundRType must be equal to -1.
    -          * BoundL/BoundR are ignored
    -          * Y[last] is ignored (it is assumed to be equal to Y[first]).
    -    *  0, which  corresponds  to  the  parabolically   terminated  spline
    -          (BoundL and/or BoundR are ignored).
    -    *  1, which corresponds to the first derivative boundary condition
    -    *  2, which corresponds to the second derivative boundary condition
    -    *  by default, BoundType=0 is used
    +    //
    +    // Now we are ready to fit and evaluate our results
    +    //
    +    spline2dinterpolant s;
    +    spline2dfitreport rep;
    +    spline2dfit(builder, s, rep);
     
    -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS:
    +    // evaluate results - function value at the grid is reproduced exactly
    +    double v;
    +    v = spline2dcalc(s, -1, 1);
    +    printf("%.2f\n", double(v)); // EXPECTED: 0.333000
     
    -Problems with periodic boundary conditions have Y[first_point]=Y[last_point].
    -However, this subroutine doesn't require you to specify equal  values  for
    -the first and last points - it automatically forces them  to  be  equal by
    -copying  Y[first_point]  (corresponds  to the leftmost,  minimal  X[])  to
    -Y[last_point]. However it is recommended to pass consistent values of Y[],
    -i.e. to make Y[first_point]=Y[last_point].
    +    // check maximum error - it must be nearly zero
    +    printf("%.2f\n", double(rep.maxerror)); // EXPECTED: 0.000
    +    return 0;
    +}
     
    -  -- ALGLIB PROJECT --
    -     Copyright 03.09.2010 by Bochkanov Sergey
    -*************************************************************************/
    -
    void alglib::spline1dconvdiff2cubic( - real_1d_array x, - real_1d_array y, - real_1d_array x2, - real_1d_array& y2, - real_1d_array& d2, - real_1d_array& dd2); -void alglib::spline1dconvdiff2cubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundltype, - double boundl, - ae_int_t boundrtype, - double boundr, - real_1d_array x2, - ae_int_t n2, - real_1d_array& y2, - real_1d_array& d2, - real_1d_array& dd2); -
    -

    Examples:   [1]  

    - +
    -
    /************************************************************************* -This function solves following problem: given table y[] of function values -at old nodes x[] and new nodes x2[], it calculates and returns table of -function values y2[] and derivatives d2[] (calculated at x2[]). +#include "stdafx.h" +#include <stdlib.h> +#include <stdio.h> +#include <math.h> +#include "interpolation.h" -This function yields same result as Spline1DBuildCubic() call followed by -sequence of Spline1DDiff() calls, but it can be several times faster when -called for ordered X[] and X2[]. +using namespace alglib; -INPUT PARAMETERS: - X - old spline nodes - Y - function values - X2 - new spline nodes -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points from X/Y are used - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundLType - boundary condition type for the left boundary - BoundL - left boundary condition (first or second derivative, - depending on the BoundLType) - BoundRType - boundary condition type for the right boundary - BoundR - right boundary condition (first or second derivative, - depending on the BoundRType) - N2 - new points count: - * N2>=2 - * if given, only first N2 points from X2 are used - * if not given, automatically detected from X2 size +int main(int argc, char **argv) +{ + // + // We build bilinear spline for f(x,y)=x+2*y+3*xy for (x,y) in [0,1]. + // Then we demonstrate how to unpack it. + // + real_1d_array x = "[0.0, 1.0]"; + real_1d_array y = "[0.0, 1.0]"; + real_1d_array f = "[0.00,1.00,2.00,6.00]"; + real_2d_array c; + ae_int_t m; + ae_int_t n; + ae_int_t d; + spline2dinterpolant s; -OUTPUT PARAMETERS: - F2 - function values at X2[] - D2 - first derivatives at X2[] + // build spline + spline2dbuildbilinearv(x, 2, y, 2, f, 1, s); -ORDER OF POINTS + // unpack and test + spline2dunpackv(s, m, n, d, c); + printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [[0, 1, 0, 1, 0,2,0,0, 1,3,0,0, 0,0,0,0, 0,0,0,0 ]] + return 0; +} -Subroutine automatically sorts points, so caller may pass unsorted array. -Function values are correctly reordered on return, so F2[I] is always -equal to S(X2[I]) independently of points order. -SETTING BOUNDARY VALUES: +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "interpolation.h"
     
    -The BoundLType/BoundRType parameters can have the following values:
    -    * -1, which corresonds to the periodic (cyclic) boundary conditions.
    -          In this case:
    -          * both BoundLType and BoundRType must be equal to -1.
    -          * BoundL/BoundR are ignored
    -          * Y[last] is ignored (it is assumed to be equal to Y[first]).
    -    *  0, which  corresponds  to  the  parabolically   terminated  spline
    -          (BoundL and/or BoundR are ignored).
    -    *  1, which corresponds to the first derivative boundary condition
    -    *  2, which corresponds to the second derivative boundary condition
    -    *  by default, BoundType=0 is used
    +using namespace alglib;
     
    -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS:
     
    -Problems with periodic boundary conditions have Y[first_point]=Y[last_point].
    -However, this subroutine doesn't require you to specify equal  values  for
    -the first and last points - it automatically forces them  to  be  equal by
    -copying  Y[first_point]  (corresponds  to the leftmost,  minimal  X[])  to
    -Y[last_point]. However it is recommended to pass consistent values of Y[],
    -i.e. to make Y[first_point]=Y[last_point].
    +int main(int argc, char **argv)
    +{
    +    //
    +    // We build bilinear vector-valued spline (f0,f1) = {x+2*y, 2*x+y}
    +    // Spline is built using function values at 2x2 grid: (x,y)=[0,1]*[0,1]
    +    // Then we perform evaluation at (x,y)=(0.1,0.3)
    +    //
    +    real_1d_array x = "[0.0, 1.0]";
    +    real_1d_array y = "[0.0, 1.0]";
    +    real_1d_array f = "[0.00,0.00, 1.00,2.00, 2.00,1.00, 3.00,3.00]";
    +    spline2dinterpolant s;
    +    real_1d_array vr;
    +    spline2dbuildbilinearv(x, 2, y, 2, f, 2, s);
    +    spline2dcalcv(s, 0.1, 0.3, vr);
    +    printf("%s\n", vr.tostring(4).c_str()); // EXPECTED: [0.700,0.500]
    +    return 0;
    +}
     
    -  -- ALGLIB PROJECT --
    -     Copyright 03.09.2010 by Bochkanov Sergey
    +
    +
    +
    + +spline3dinterpolant
    + +spline3dbuildtrilinearv
    +spline3dcalc
    +spline3dcalcv
    +spline3dcalcvbuf
    +spline3dlintransf
    +spline3dlintransxyz
    +spline3dresampletrilinear
    +spline3dunpackv
    + + + + +
    spline3d_trilinear Trilinear spline interpolation
    spline3d_vector Vector-valued trilinear spline interpolation
    + +
    +
    /************************************************************************* +3-dimensional spline inteprolant *************************************************************************/ -
    void alglib::spline1dconvdiffcubic( - real_1d_array x, - real_1d_array y, - real_1d_array x2, - real_1d_array& y2, - real_1d_array& d2); -void alglib::spline1dconvdiffcubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundltype, - double boundl, - ae_int_t boundrtype, - double boundr, - real_1d_array x2, - ae_int_t n2, - real_1d_array& y2, - real_1d_array& d2); +
    class spline3dinterpolant +{ +};
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This subroutine differentiates the spline. +This subroutine builds trilinear vector-valued spline. INPUT PARAMETERS: - C - spline interpolant. - X - point + X - spline abscissas, array[0..N-1] + Y - spline ordinates, array[0..M-1] + Z - spline applicates, array[0..L-1] + F - function values, array[0..M*N*L*D-1]: + * first D elements store D values at (X[0],Y[0],Z[0]) + * next D elements store D values at (X[1],Y[0],Z[0]) + * next D elements store D values at (X[2],Y[0],Z[0]) + * ... + * next D elements store D values at (X[0],Y[1],Z[0]) + * next D elements store D values at (X[1],Y[1],Z[0]) + * next D elements store D values at (X[2],Y[1],Z[0]) + * ... + * next D elements store D values at (X[0],Y[0],Z[1]) + * next D elements store D values at (X[1],Y[0],Z[1]) + * next D elements store D values at (X[2],Y[0],Z[1]) + * ... + * general form - D function values at (X[i],Y[j]) are stored + at F[D*(N*(M*K+J)+I)...D*(N*(M*K+J)+I)+D-1]. + M,N, + L - grid size, M>=2, N>=2, L>=2 + D - vector dimension, D>=1 -Result: - S - S(x) - DS - S'(x) - D2S - S''(x) +OUTPUT PARAMETERS: + C - spline interpolant -- ALGLIB PROJECT -- - Copyright 24.06.2007 by Bochkanov Sergey + Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1ddiff( - spline1dinterpolant c, - double x, - double& s, - double& ds, - double& d2s); +
    void alglib::spline3dbuildtrilinearv( + real_1d_array x, + ae_int_t n, + real_1d_array y, + ae_int_t m, + real_1d_array z, + ae_int_t l, + real_1d_array f, + ae_int_t d, + spline3dinterpolant& c, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This function solves following problem: given table y[] of function values -at nodes x[], it calculates and returns tables of first and second -function derivatives d1[] and d2[] (calculated at the same nodes x[]). - -This function yields same result as Spline1DBuildCubic() call followed by -sequence of Spline1DDiff() calls, but it can be several times faster when -called for ordered X[] and X2[]. +This subroutine calculates the value of the trilinear or tricubic spline at +the given point (X,Y,Z). INPUT PARAMETERS: - X - spline nodes - Y - function values - -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points are used - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundLType - boundary condition type for the left boundary - BoundL - left boundary condition (first or second derivative, - depending on the BoundLType) - BoundRType - boundary condition type for the right boundary - BoundR - right boundary condition (first or second derivative, - depending on the BoundRType) - -OUTPUT PARAMETERS: - D1 - S' values at X[] - D2 - S'' values at X[] - -ORDER OF POINTS - -Subroutine automatically sorts points, so caller may pass unsorted array. -Derivative values are correctly reordered on return, so D[I] is always -equal to S'(X[I]) independently of points order. - -SETTING BOUNDARY VALUES: - -The BoundLType/BoundRType parameters can have the following values: - * -1, which corresonds to the periodic (cyclic) boundary conditions. - In this case: - * both BoundLType and BoundRType must be equal to -1. - * BoundL/BoundR are ignored - * Y[last] is ignored (it is assumed to be equal to Y[first]). - * 0, which corresponds to the parabolically terminated spline - (BoundL and/or BoundR are ignored). - * 1, which corresponds to the first derivative boundary condition - * 2, which corresponds to the second derivative boundary condition - * by default, BoundType=0 is used - -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: + C - coefficients table. + Built by BuildBilinearSpline or BuildBicubicSpline. + X, Y, + Z - point -Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. -However, this subroutine doesn't require you to specify equal values for -the first and last points - it automatically forces them to be equal by -copying Y[first_point] (corresponds to the leftmost, minimal X[]) to -Y[last_point]. However it is recommended to pass consistent values of Y[], -i.e. to make Y[first_point]=Y[last_point]. +Result: + S(x,y,z) -- ALGLIB PROJECT -- - Copyright 03.09.2010 by Bochkanov Sergey + Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dgriddiff2cubic( - real_1d_array x, - real_1d_array y, - real_1d_array& d1, - real_1d_array& d2); -void alglib::spline1dgriddiff2cubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundltype, - double boundl, - ae_int_t boundrtype, - double boundr, - real_1d_array& d1, - real_1d_array& d2); +
    double alglib::spline3dcalc( + spline3dinterpolant c, + double x, + double y, + double z, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This function solves following problem: given table y[] of function values -at nodes x[], it calculates and returns table of function derivatives d[] -(calculated at the same nodes x[]). - -This function yields same result as Spline1DBuildCubic() call followed by -sequence of Spline1DDiff() calls, but it can be several times faster when -called for ordered X[] and X2[]. +This subroutine calculates trilinear or tricubic vector-valued spline at the +given point (X,Y,Z). INPUT PARAMETERS: - X - spline nodes - Y - function values - -OPTIONAL PARAMETERS: - N - points count: - * N>=2 - * if given, only first N points are used - * if not given, automatically detected from X/Y sizes - (len(X) must be equal to len(Y)) - BoundLType - boundary condition type for the left boundary - BoundL - left boundary condition (first or second derivative, - depending on the BoundLType) - BoundRType - boundary condition type for the right boundary - BoundR - right boundary condition (first or second derivative, - depending on the BoundRType) + C - spline interpolant. + X, Y, + Z - point OUTPUT PARAMETERS: - D - derivative values at X[] - -ORDER OF POINTS - -Subroutine automatically sorts points, so caller may pass unsorted array. -Derivative values are correctly reordered on return, so D[I] is always -equal to S'(X[I]) independently of points order. - -SETTING BOUNDARY VALUES: - -The BoundLType/BoundRType parameters can have the following values: - * -1, which corresonds to the periodic (cyclic) boundary conditions. - In this case: - * both BoundLType and BoundRType must be equal to -1. - * BoundL/BoundR are ignored - * Y[last] is ignored (it is assumed to be equal to Y[first]). - * 0, which corresponds to the parabolically terminated spline - (BoundL and/or BoundR are ignored). - * 1, which corresponds to the first derivative boundary condition - * 2, which corresponds to the second derivative boundary condition - * by default, BoundType=0 is used - -PROBLEMS WITH PERIODIC BOUNDARY CONDITIONS: - -Problems with periodic boundary conditions have Y[first_point]=Y[last_point]. -However, this subroutine doesn't require you to specify equal values for -the first and last points - it automatically forces them to be equal by -copying Y[first_point] (corresponds to the leftmost, minimal X[]) to -Y[last_point]. However it is recommended to pass consistent values of Y[], -i.e. to make Y[first_point]=Y[last_point]. + F - array[D] which stores function values. F is out-parameter and + it is reallocated after call to this function. In case you + want to reuse previously allocated F, you may use + Spline2DCalcVBuf(), which reallocates F only when it is too + small. -- ALGLIB PROJECT -- - Copyright 03.09.2010 by Bochkanov Sergey + Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dgriddiffcubic( - real_1d_array x, - real_1d_array y, - real_1d_array& d); -void alglib::spline1dgriddiffcubic( - real_1d_array x, - real_1d_array y, - ae_int_t n, - ae_int_t boundltype, - double boundl, - ae_int_t boundrtype, - double boundr, - real_1d_array& d); +
    void alglib::spline3dcalcv( + spline3dinterpolant c, + double x, + double y, + double z, + real_1d_array& f, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine integrates the spline. +This subroutine calculates bilinear or bicubic vector-valued spline at the +given point (X,Y,Z). INPUT PARAMETERS: C - spline interpolant. - X - right bound of the integration interval [a, x], - here 'a' denotes min(x[]) -Result: - integral(S(t)dt,a,x) + X, Y, + Z - point + F - output buffer, possibly preallocated array. In case array size + is large enough to store result, it is not reallocated. Array + which is too short will be reallocated + +OUTPUT PARAMETERS: + F - array[D] (or larger) which stores function values -- ALGLIB PROJECT -- - Copyright 23.06.2007 by Bochkanov Sergey + Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ -
    double alglib::spline1dintegrate(spline1dinterpolant c, double x); +
    void alglib::spline3dcalcvbuf( + spline3dinterpolant c, + double x, + double y, + double z, + real_1d_array& f, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine performs linear transformation of the spline argument. +This subroutine performs linear transformation of the spline. INPUT PARAMETERS: C - spline interpolant. - A, B- transformation coefficients: x = A*t + B -Result: + A, B- transformation coefficients: S2(x,y) = A*S(x,y,z) + B + +OUTPUT PARAMETERS: C - transformed spline -- ALGLIB PROJECT -- - Copyright 30.06.2007 by Bochkanov Sergey + Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dlintransx(spline1dinterpolant c, double a, double b); +
    void alglib::spline3dlintransf( + spline3dinterpolant c, + double a, + double b, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine performs linear transformation of the spline. +This subroutine performs linear transformation of the spline argument. INPUT PARAMETERS: - C - spline interpolant. - A, B- transformation coefficients: S2(x) = A*S(x) + B -Result: + C - spline interpolant + AX, BX - transformation coefficients: x = A*u + B + AY, BY - transformation coefficients: y = A*v + B + AZ, BZ - transformation coefficients: z = A*w + B + +OUTPUT PARAMETERS: C - transformed spline -- ALGLIB PROJECT -- - Copyright 30.06.2007 by Bochkanov Sergey + Copyright 26.04.2012 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dlintransy(spline1dinterpolant c, double a, double b); +
    void alglib::spline3dlintransxyz( + spline3dinterpolant c, + double ax, + double bx, + double ay, + double by, + double az, + double bz, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine unpacks the spline into the coefficients table. +Trilinear spline resampling INPUT PARAMETERS: - C - spline interpolant. - X - point + A - array[0..OldXCount*OldYCount*OldZCount-1], function + values at the old grid, : + A[0] x=0,y=0,z=0 + A[1] x=1,y=0,z=0 + A[..] ... + A[..] x=oldxcount-1,y=0,z=0 + A[..] x=0,y=1,z=0 + A[..] ... + ... + OldZCount - old Z-count, OldZCount>1 + OldYCount - old Y-count, OldYCount>1 + OldXCount - old X-count, OldXCount>1 + NewZCount - new Z-count, NewZCount>1 + NewYCount - new Y-count, NewYCount>1 + NewXCount - new X-count, NewXCount>1 OUTPUT PARAMETERS: - Tbl - coefficients table, unpacked format, array[0..N-2, 0..5]. - For I = 0...N-2: - Tbl[I,0] = X[i] - Tbl[I,1] = X[i+1] - Tbl[I,2] = C0 - Tbl[I,3] = C1 - Tbl[I,4] = C2 - Tbl[I,5] = C3 - On [x[i], x[i+1]] spline is equals to: - S(x) = C0 + C1*t + C2*t^2 + C3*t^3 - t = x-x[i] - -NOTE: - You can rebuild spline with Spline1DBuildHermite() function, which - accepts as inputs function values and derivatives at nodes, which are - easy to calculate when you have coefficients. + B - array[0..NewXCount*NewYCount*NewZCount-1], function + values at the new grid: + B[0] x=0,y=0,z=0 + B[1] x=1,y=0,z=0 + B[..] ... + B[..] x=newxcount-1,y=0,z=0 + B[..] x=0,y=1,z=0 + B[..] ... + ... - -- ALGLIB PROJECT -- - Copyright 29.06.2007 by Bochkanov Sergey + -- ALGLIB routine -- + 26.04.2012 + Copyright by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline1dunpack( - spline1dinterpolant c, - ae_int_t& n, - real_2d_array& tbl); +
    void alglib::spline3dresampletrilinear( + real_1d_array a, + ae_int_t oldzcount, + ae_int_t oldycount, + ae_int_t oldxcount, + ae_int_t newzcount, + ae_int_t newycount, + ae_int_t newxcount, + real_1d_array& b, + const xparams _params = alglib::xdefault);
    - +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    -    //
    -    // We use cubic spline to do resampling, i.e. having
    -    // values of f(x)=x^2 sampled at 5 equidistant nodes on [-1,+1]
    -    // we calculate values/derivatives of cubic spline on 
    -    // another grid (equidistant with 9 nodes on [-1,+1])
    -    // WITHOUT CONSTRUCTION OF SPLINE OBJECT.
    -    //
    -    // There are efficient functions spline1dconvcubic(),
    -    // spline1dconvdiffcubic() and spline1dconvdiff2cubic() 
    -    // for such calculations.
    -    //
    -    // We use default boundary conditions ("parabolically terminated
    -    // spline") because cubic spline built with such boundary conditions 
    -    // will exactly reproduce any quadratic f(x).
    -    //
    -    // Actually, we could use natural conditions, but we feel that 
    -    // spline which exactly reproduces f() will show us more 
    -    // understandable results.
    -    //
    -    real_1d_array x_old = "[-1.0,-0.5,0.0,+0.5,+1.0]";
    -    real_1d_array y_old = "[+1.0,0.25,0.0,0.25,+1.0]";
    -    real_1d_array x_new = "[-1.00,-0.75,-0.50,-0.25,0.00,+0.25,+0.50,+0.75,+1.00]";
    -    real_1d_array y_new;
    -    real_1d_array d1_new;
    -    real_1d_array d2_new;
    -
    -    //
    -    // First, conversion without differentiation.
    -    //
    -    //
    -    spline1dconvcubic(x_old, y_old, x_new, y_new);
    -    printf("%s\n", y_new.tostring(3).c_str()); // EXPECTED: [1.0000, 0.5625, 0.2500, 0.0625, 0.0000, 0.0625, 0.2500, 0.5625, 1.0000]
    -
    -    //
    -    // Then, conversion with differentiation (first derivatives only)
    -    //
    -    //
    -    spline1dconvdiffcubic(x_old, y_old, x_new, y_new, d1_new);
    -    printf("%s\n", y_new.tostring(3).c_str()); // EXPECTED: [1.0000, 0.5625, 0.2500, 0.0625, 0.0000, 0.0625, 0.2500, 0.5625, 1.0000]
    -    printf("%s\n", d1_new.tostring(3).c_str()); // EXPECTED: [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]
    -
    -    //
    -    // Finally, conversion with first and second derivatives
    -    //
    -    //
    -    spline1dconvdiff2cubic(x_old, y_old, x_new, y_new, d1_new, d2_new);
    -    printf("%s\n", y_new.tostring(3).c_str()); // EXPECTED: [1.0000, 0.5625, 0.2500, 0.0625, 0.0000, 0.0625, 0.2500, 0.5625, 1.0000]
    -    printf("%s\n", d1_new.tostring(3).c_str()); // EXPECTED: [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]
    -    printf("%s\n", d2_new.tostring(3).c_str()); // EXPECTED: [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0]
    -    return 0;
    -}
    +
    /************************************************************************* +This subroutine unpacks tri-dimensional spline into the coefficients table +INPUT PARAMETERS: + C - spline interpolant. -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +Result:
    +    N   -   grid size (X)
    +    M   -   grid size (Y)
    +    L   -   grid size (Z)
    +    D   -   number of components
    +    SType-  spline type. Currently, only one spline type is supported:
    +            trilinear spline, as indicated by SType=1.
    +    Tbl -   spline coefficients: [0..(N-1)*(M-1)*(L-1)*D-1, 0..13].
    +            For T=0..D-1 (component index), I = 0...N-2 (x index),
    +            J=0..M-2 (y index), K=0..L-2 (z index):
    +                Q := T + I*D + J*D*(N-1) + K*D*(N-1)*(M-1),
     
    -using namespace alglib;
    +                Q-th row stores decomposition for T-th component of the
    +                vector-valued function
     
    +                Tbl[Q,0] = X[i]
    +                Tbl[Q,1] = X[i+1]
    +                Tbl[Q,2] = Y[j]
    +                Tbl[Q,3] = Y[j+1]
    +                Tbl[Q,4] = Z[k]
    +                Tbl[Q,5] = Z[k+1]
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // We use cubic spline to interpolate f(x)=x^2 sampled 
    -    // at 5 equidistant nodes on [-1,+1].
    -    //
    -    // First, we use default boundary conditions ("parabolically terminated
    -    // spline") because cubic spline built with such boundary conditions 
    -    // will exactly reproduce any quadratic f(x).
    -    //
    -    // Then we try to use natural boundary conditions
    -    //     d2S(-1)/dx^2 = 0.0
    -    //     d2S(+1)/dx^2 = 0.0
    -    // and see that such spline interpolated f(x) with small error.
    -    //
    -    real_1d_array x = "[-1.0,-0.5,0.0,+0.5,+1.0]";
    -    real_1d_array y = "[+1.0,0.25,0.0,0.25,+1.0]";
    -    double t = 0.25;
    -    double v;
    -    spline1dinterpolant s;
    -    ae_int_t natural_bound_type = 2;
    -    //
    -    // Test exact boundary conditions: build S(x), calculare S(0.25)
    -    // (almost same as original function)
    -    //
    -    spline1dbuildcubic(x, y, s);
    -    v = spline1dcalc(s, t);
    -    printf("%.4f\n", double(v)); // EXPECTED: 0.0625
    +                Tbl[Q,6] = C000
    +                Tbl[Q,7] = C100
    +                Tbl[Q,8] = C010
    +                Tbl[Q,9] = C110
    +                Tbl[Q,10]= C001
    +                Tbl[Q,11]= C101
    +                Tbl[Q,12]= C011
    +                Tbl[Q,13]= C111
    +            On each grid square spline is equals to:
    +                S(x) = SUM(c[i,j,k]*(x^i)*(y^j)*(z^k), i=0..1, j=0..1, k=0..1)
    +                t = x-x[j]
    +                u = y-y[i]
    +                v = z-z[k]
     
    -    //
    -    // Test natural boundary conditions: build S(x), calculare S(0.25)
    -    // (small interpolation error)
    -    //
    -    spline1dbuildcubic(x, y, 5, natural_bound_type, 0.0, natural_bound_type, 0.0, s);
    -    v = spline1dcalc(s, t);
    -    printf("%.3f\n", double(v)); // EXPECTED: 0.0580
    -    return 0;
    -}
    +            NOTE: format of Tbl is given for SType=1. Future versions of
    +                  ALGLIB can use different formats for different values of
    +                  SType.
     
    +  -- ALGLIB PROJECT --
    +     Copyright 26.04.2012 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::spline3dunpackv( + spline3dinterpolant c, + ae_int_t& n, + ae_int_t& m, + ae_int_t& l, + ae_int_t& d, + ae_int_t& stype, + real_2d_array& tbl, + const xparams _params = alglib::xdefault); -
    + +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -43941,78 +55832,41 @@
     int main(int argc, char **argv)
     {
         //
    -    // We use cubic spline to do grid differentiation, i.e. having
    -    // values of f(x)=x^2 sampled at 5 equidistant nodes on [-1,+1]
    -    // we calculate derivatives of cubic spline at nodes WITHOUT
    -    // CONSTRUCTION OF SPLINE OBJECT.
    -    //
    -    // There are efficient functions spline1dgriddiffcubic() and
    -    // spline1dgriddiff2cubic() for such calculations.
    -    //
    -    // We use default boundary conditions ("parabolically terminated
    -    // spline") because cubic spline built with such boundary conditions 
    -    // will exactly reproduce any quadratic f(x).
    -    //
    -    // Actually, we could use natural conditions, but we feel that 
    -    // spline which exactly reproduces f() will show us more 
    -    // understandable results.
    -    //
    -    real_1d_array x = "[-1.0,-0.5,0.0,+0.5,+1.0]";
    -    real_1d_array y = "[+1.0,0.25,0.0,0.25,+1.0]";
    -    real_1d_array d1;
    -    real_1d_array d2;
    -
    -    //
    -    // We calculate first derivatives: they must be equal to 2*x
    -    //
    -    spline1dgriddiffcubic(x, y, d1);
    -    printf("%s\n", d1.tostring(3).c_str()); // EXPECTED: [-2.0, -1.0, 0.0, +1.0, +2.0]
    -
    -    //
    -    // Now test griddiff2, which returns first AND second derivatives.
    -    // First derivative is 2*x, second is equal to 2.0
    -    //
    -    spline1dgriddiff2cubic(x, y, d1, d2);
    -    printf("%s\n", d1.tostring(3).c_str()); // EXPECTED: [-2.0, -1.0, 0.0, +1.0, +2.0]
    -    printf("%s\n", d2.tostring(3).c_str()); // EXPECTED: [ 2.0,  2.0, 2.0,  2.0,  2.0]
    -    return 0;
    -}
    -
    -
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    -
    -
    -int main(int argc, char **argv)
    -{
    +    // We use trilinear spline to interpolate f(x,y,z)=x+xy+z sampled 
    +    // at (x,y,z) from [0.0, 1.0] X [0.0, 1.0] X [0.0, 1.0].
         //
    -    // We use piecewise linear spline to interpolate f(x)=x^2 sampled 
    -    // at 5 equidistant nodes on [-1,+1].
    +    // We store x, y and z-values at local arrays with same names.
    +    // Function values are stored in the array F as follows:
    +    //     f[0]     (x,y,z) = (0,0,0)
    +    //     f[1]     (x,y,z) = (1,0,0)
    +    //     f[2]     (x,y,z) = (0,1,0)
    +    //     f[3]     (x,y,z) = (1,1,0)
    +    //     f[4]     (x,y,z) = (0,0,1)
    +    //     f[5]     (x,y,z) = (1,0,1)
    +    //     f[6]     (x,y,z) = (0,1,1)
    +    //     f[7]     (x,y,z) = (1,1,1)
         //
    -    real_1d_array x = "[-1.0,-0.5,0.0,+0.5,+1.0]";
    -    real_1d_array y = "[+1.0,0.25,0.0,0.25,+1.0]";
    -    double t = 0.25;
    +    real_1d_array x = "[0.0, 1.0]";
    +    real_1d_array y = "[0.0, 1.0]";
    +    real_1d_array z = "[0.0, 1.0]";
    +    real_1d_array f = "[0,1,0,2,1,2,1,3]";
    +    double vx = 0.50;
    +    double vy = 0.50;
    +    double vz = 0.50;
         double v;
    -    spline1dinterpolant s;
    +    spline3dinterpolant s;
     
         // build spline
    -    spline1dbuildlinear(x, y, s);
    +    spline3dbuildtrilinearv(x, 2, y, 2, z, 2, f, 1, s);
     
    -    // calculate S(0.25) - it is quite different from 0.25^2=0.0625
    -    v = spline1dcalc(s, t);
    -    printf("%.4f\n", double(v)); // EXPECTED: 0.125
    +    // calculate S(0.5,0.5,0.5)
    +    v = spline3dcalc(s, vx, vy, vz);
    +    printf("%.4f\n", double(v)); // EXPECTED: 1.2500
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
    @@ -44026,1005 +55880,1366 @@
     int main(int argc, char **argv)
     {
         //
    -    // Spline built witn spline1dbuildcubic() can be non-monotone even when
    -    // Y-values form monotone sequence. Say, for x=[0,1,2] and y=[0,1,1]
    -    // cubic spline will monotonically grow until x=1.5 and then start
    -    // decreasing.
    -    //
    -    // That's why ALGLIB provides special spline construction function
    -    // which builds spline which preserves monotonicity of the original
    -    // dataset.
    +    // We use trilinear vector-valued spline to interpolate {f0,f1}={x+xy+z,x+xy+yz+z}
    +    // sampled at (x,y,z) from [0.0, 1.0] X [0.0, 1.0] X [0.0, 1.0].
         //
    -    // NOTE: in case original dataset is non-monotonic, ALGLIB splits it
    -    // into monotone subsequences and builds piecewise monotonic spline.
    +    // We store x, y and z-values at local arrays with same names.
    +    // Function values are stored in the array F as follows:
    +    //     f[0]     f0, (x,y,z) = (0,0,0)
    +    //     f[1]     f1, (x,y,z) = (0,0,0)
    +    //     f[2]     f0, (x,y,z) = (1,0,0)
    +    //     f[3]     f1, (x,y,z) = (1,0,0)
    +    //     f[4]     f0, (x,y,z) = (0,1,0)
    +    //     f[5]     f1, (x,y,z) = (0,1,0)
    +    //     f[6]     f0, (x,y,z) = (1,1,0)
    +    //     f[7]     f1, (x,y,z) = (1,1,0)
    +    //     f[8]     f0, (x,y,z) = (0,0,1)
    +    //     f[9]     f1, (x,y,z) = (0,0,1)
    +    //     f[10]    f0, (x,y,z) = (1,0,1)
    +    //     f[11]    f1, (x,y,z) = (1,0,1)
    +    //     f[12]    f0, (x,y,z) = (0,1,1)
    +    //     f[13]    f1, (x,y,z) = (0,1,1)
    +    //     f[14]    f0, (x,y,z) = (1,1,1)
    +    //     f[15]    f1, (x,y,z) = (1,1,1)
         //
    -    real_1d_array x = "[0,1,2]";
    -    real_1d_array y = "[0,1,1]";
    -    spline1dinterpolant s;
    +    real_1d_array x = "[0.0, 1.0]";
    +    real_1d_array y = "[0.0, 1.0]";
    +    real_1d_array z = "[0.0, 1.0]";
    +    real_1d_array f = "[0,0, 1,1, 0,0, 2,2, 1,1, 2,2, 1,2, 3,4]";
    +    double vx = 0.50;
    +    double vy = 0.50;
    +    double vz = 0.50;
    +    spline3dinterpolant s;
     
         // build spline
    -    spline1dbuildmonotone(x, y, s);
    +    spline3dbuildtrilinearv(x, 2, y, 2, z, 2, f, 2, s);
     
    -    // calculate S at x = [-0.5, 0.0, 0.5, 1.0, 1.5, 2.0]
    -    // you may see that spline is really monotonic
    -    double v;
    -    v = spline1dcalc(s, -0.5);
    -    printf("%.4f\n", double(v)); // EXPECTED: 0.0000
    -    v = spline1dcalc(s, 0.0);
    -    printf("%.4f\n", double(v)); // EXPECTED: 0.0000
    -    v = spline1dcalc(s, +0.5);
    -    printf("%.4f\n", double(v)); // EXPECTED: 0.5000
    -    v = spline1dcalc(s, 1.0);
    -    printf("%.4f\n", double(v)); // EXPECTED: 1.0000
    -    v = spline1dcalc(s, 1.5);
    -    printf("%.4f\n", double(v)); // EXPECTED: 1.0000
    -    v = spline1dcalc(s, 2.0);
    -    printf("%.4f\n", double(v)); // EXPECTED: 1.0000
    +    // calculate S(0.5,0.5,0.5) - we have vector of values instead of single value
    +    real_1d_array v;
    +    spline3dcalcv(s, vx, vy, vz, v);
    +    printf("%s\n", v.tostring(4).c_str()); // EXPECTED: [1.2500,1.5000]
         return 0;
     }
     
     
    -
    + - -
    -
    /************************************************************************* -2-dimensional spline inteprolant -*************************************************************************/ -
    class spline2dinterpolant -{ -}; - -
    - -
    -
    /************************************************************************* -This subroutine was deprecated in ALGLIB 3.6.0 - -We recommend you to switch to Spline2DBuildBicubicV(), which is more -flexible and accepts its arguments in more convenient order. - - -- ALGLIB PROJECT -- - Copyright 05.07.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::spline2dbuildbicubic( - real_1d_array x, - real_1d_array y, - real_2d_array f, - ae_int_t m, - ae_int_t n, - spline2dinterpolant& c); - -
    - -
    -
    /************************************************************************* -This subroutine builds bicubic vector-valued spline. - -Input parameters: - X - spline abscissas, array[0..N-1] - Y - spline ordinates, array[0..M-1] - F - function values, array[0..M*N*D-1]: - * first D elements store D values at (X[0],Y[0]) - * next D elements store D values at (X[1],Y[0]) - * general form - D function values at (X[i],Y[j]) are stored - at F[D*(J*N+I)...D*(J*N+I)+D-1]. - M,N - grid size, M>=2, N>=2 - D - vector dimension, D>=1 - -Output parameters: - C - spline interpolant - - -- ALGLIB PROJECT -- - Copyright 16.04.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::spline2dbuildbicubicv( - real_1d_array x, - ae_int_t n, - real_1d_array y, - ae_int_t m, - real_1d_array f, - ae_int_t d, - spline2dinterpolant& c); - -
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This subroutine was deprecated in ALGLIB 3.6.0 - -We recommend you to switch to Spline2DBuildBilinearV(), which is more -flexible and accepts its arguments in more convenient order. - - -- ALGLIB PROJECT -- - Copyright 05.07.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::spline2dbuildbilinear( - real_1d_array x, - real_1d_array y, - real_2d_array f, - ae_int_t m, - ae_int_t n, - spline2dinterpolant& c); +This object stores state of the SSA model. + +You should use ALGLIB functions to work with this object. +*************************************************************************/ +
    class ssamodel +{ +};
    - +
     
    /************************************************************************* -This subroutine builds bilinear vector-valued spline. +This function adds data sequence to SSA model. Only single-dimensional +sequences are supported. -Input parameters: - X - spline abscissas, array[0..N-1] - Y - spline ordinates, array[0..M-1] - F - function values, array[0..M*N*D-1]: - * first D elements store D values at (X[0],Y[0]) - * next D elements store D values at (X[1],Y[0]) - * general form - D function values at (X[i],Y[j]) are stored - at F[D*(J*N+I)...D*(J*N+I)+D-1]. - M,N - grid size, M>=2, N>=2 - D - vector dimension, D>=1 +What is a sequences? Following definitions/requirements apply: +* a sequence is an array of values measured in subsequent, equally + separated time moments (ticks). +* you may have many sequences in your dataset; say, one sequence may + correspond to one trading session. +* sequence length should be larger than current window length (shorter + sequences will be ignored during analysis). +* analysis is performed within a sequence; different sequences are NOT + stacked together to produce one large contiguous stream of data. +* analysis is performed for all sequences at once, i.e. same set of basis + vectors is computed for all sequences -Output parameters: - C - spline interpolant +INCREMENTAL ANALYSIS - -- ALGLIB PROJECT -- - Copyright 16.04.2012 by Bochkanov Sergey +This function is non intended for incremental updates of previously found +SSA basis. Calling it invalidates all previous analysis results (basis is +reset and will be recalculated from zero during next analysis). + +If you want to perform incremental/real-time SSA, consider using +following functions: +* ssaappendpointandupdate() for appending one point +* ssaappendsequenceandupdate() for appending new sequence + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - array[N], data, can be larger (additional values + are ignored) + N - data length, can be automatically determined from + the array length. N>=0. + +OUTPUT PARAMETERS: + S - SSA model, updated + +NOTE: you can clear dataset with ssacleardata() + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dbuildbilinearv( +
    void alglib::ssaaddsequence( + ssamodel s, + real_1d_array x, + const xparams _params = alglib::xdefault); +void alglib::ssaaddsequence( + ssamodel s, real_1d_array x, ae_int_t n, - real_1d_array y, - ae_int_t m, - real_1d_array f, - ae_int_t d, - spline2dinterpolant& c); + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -This subroutine calculates the value of the bilinear or bicubic spline at -the given point X. +This function: +* builds SSA basis using internally stored (entire) dataset +* returns reconstruction for the last NTicks of the last sequence -Input parameters: - C - coefficients table. - Built by BuildBilinearSpline or BuildBicubicSpline. - X, Y- point +If you want to analyze some other sequence, use ssaanalyzesequence(). -Result: - S(x,y) +Reconstruction phase involves generation of NTicks-WindowWidth sliding +windows, their decomposition using empirical orthogonal functions found by +SSA, followed by averaging of each data point across several overlapping +windows. Thus, every point in the output trend is reconstructed using up +to WindowWidth overlapping windows (WindowWidth windows exactly in the +inner points, just one window at the extremal points). - -- ALGLIB PROJECT -- - Copyright 05.07.2007 by Bochkanov Sergey -*************************************************************************/ -
    double alglib::spline2dcalc(spline2dinterpolant c, double x, double y); +IMPORTANT: due to averaging this function returns different results for + different values of NTicks. It is expected and not a bug. -
    -

    Examples:   [1]  [2]  

    - -
    -
    /************************************************************************* -This subroutine calculates bilinear or bicubic vector-valued spline at the -given point (X,Y). + For example: + * Trend[NTicks-1] is always same because it is not averaged in + any case (same applies to Trend[0]). + * Trend[NTicks-2] has different values for NTicks=WindowWidth + and NTicks=WindowWidth+1 because former case means that no + averaging is performed, and latter case means that averaging + using two sliding windows is performed. Larger values of + NTicks produce same results as NTicks=WindowWidth+1. + * ...and so on... + +PERFORMANCE: this function has O((NTicks-WindowWidth)*WindowWidth*NBasis) + running time. If you work in time-constrained setting and + have to analyze just a few last ticks, choosing NTicks equal + to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth + will result in good compromise between noise cancellation and + analysis speed. INPUT PARAMETERS: - C - spline interpolant. - X, Y- point + S - SSA model + NTicks - number of ticks to analyze, Nticks>=1. + * special case of NTicks<=WindowWidth is handled + by analyzing last window and returning NTicks + last ticks. + * special case NTicks>LastSequenceLen is handled + by prepending result with NTicks-LastSequenceLen + zeros. OUTPUT PARAMETERS: - F - array[D] which stores function values. F is out-parameter and - it is reallocated after call to this function. In case you - want to reuse previously allocated F, you may use - Spline2DCalcVBuf(), which reallocates F only when it is too - small. + Trend - array[NTicks], reconstructed trend line + Noise - array[NTicks], the rest of the signal; + it holds that ActualData = Trend+Noise. - -- ALGLIB PROJECT -- - Copyright 16.04.2012 by Bochkanov Sergey + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + +In any case, only basis is reused. Reconstruction is performed from +scratch every time you call this function. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the window length (analysis can be done, + but we can not perform reconstruction on the last sequence) + +Calling this function in degenerate cases returns following result: +* in any case, NTicks ticks is returned +* trend is assumed to be zero +* noise is initialized by the last sequence; if last sequence is shorter + than the window size, it is moved to the end of the array, and the + beginning of the noise array is filled by zeros + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is constructed). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dcalcv( - spline2dinterpolant c, - double x, - double y, - real_1d_array& f); +
    void alglib::ssaanalyzelast( + ssamodel s, + ae_int_t nticks, + real_1d_array& trend, + real_1d_array& noise, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This subroutine calculates bilinear or bicubic vector-valued spline at the -given point (X,Y). +This function executes SSA on internally stored dataset and returns +analysis for the last window of the last sequence. Such analysis is +an lightweight alternative for full scale reconstruction (see below). + +Typical use case for this function is real-time setting, when you are +interested in quick-and-dirty (very quick and very dirty) processing of +just a few last ticks of the trend. + +IMPORTANT: full scale SSA involves analysis of the ENTIRE dataset, + with reconstruction being done for all positions of sliding + window with subsequent hankelization (diagonal averaging) of + the resulting matrix. + + Such analysis requires O((DataLen-Window)*Window*NBasis) FLOPs + and can be quite costly. However, it has nice noise-canceling + effects due to averaging. + + This function performs REDUCED analysis of the last window. It + is much faster - just O(Window*NBasis), but its results are + DIFFERENT from that of ssaanalyzelast(). In particular, first + few points of the trend are much more prone to noise. INPUT PARAMETERS: - C - spline interpolant. - X, Y- point - F - output buffer, possibly preallocated array. In case array size - is large enough to store result, it is not reallocated. Array - which is too short will be reallocated + S - SSA model OUTPUT PARAMETERS: - F - array[D] (or larger) which stores function values + Trend - array[WindowSize], reconstructed trend line + Noise - array[WindowSize], the rest of the signal; + it holds that ActualData = Trend+Noise. + NTicks - current WindowSize - -- ALGLIB PROJECT -- - Copyright 16.04.2012 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::spline2dcalcvbuf( - spline2dinterpolant c, - double x, - double y, - real_1d_array& f); -
    - -
    -
    /************************************************************************* -This subroutine makes the copy of the spline model. +CACHING/REUSE OF THE BASIS -Input parameters: - C - spline interpolant +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. -Output parameters: - CC - spline copy +In any case, only basis is reused. Reconstruction is performed from +scratch every time you call this function. - -- ALGLIB PROJECT -- - Copyright 29.06.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::spline2dcopy(spline2dinterpolant c, spline2dinterpolant& cc); -
    -

    Examples:   [1]  

    - -
    -
    /************************************************************************* -This subroutine calculates the value of the bilinear or bicubic spline at -the given point X and its derivatives. +HANDLING OF DEGENERATE CASES -Input parameters: - C - spline interpolant. - X, Y- point +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the window length (analysis can be done, + but we can not perform reconstruction on the last sequence) -Output parameters: - F - S(x,y) - FX - dS(x,y)/dX - FY - dS(x,y)/dY - FXY - d2S(x,y)/dXdY +Calling this function in degenerate cases returns following result: +* in any case, WindowWidth ticks is returned +* trend is assumed to be zero +* noise is initialized by the last sequence; if last sequence is shorter + than the window size, it is moved to the end of the array, and the + beginning of the noise array is filled by zeros - -- ALGLIB PROJECT -- - Copyright 05.07.2007 by Bochkanov Sergey +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is constructed). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2ddiff( - spline2dinterpolant c, - double x, - double y, - double& f, - double& fx, - double& fy, - double& fxy); +
    void alglib::ssaanalyzelastwindow( + ssamodel s, + real_1d_array& trend, + real_1d_array& noise, + ae_int_t& nticks, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine performs linear transformation of the spline. +This function: +* builds SSA basis using internally stored (entire) dataset +* returns reconstruction for the sequence being passed to this function -Input parameters: - C - spline interpolant. - A, B- transformation coefficients: S2(x,y) = A*S(x,y) + B +If you want to analyze last sequence stored in the model, use +ssaanalyzelast(). -Output parameters: - C - transformed spline +Reconstruction phase involves generation of NTicks-WindowWidth sliding +windows, their decomposition using empirical orthogonal functions found by +SSA, followed by averaging of each data point across several overlapping +windows. Thus, every point in the output trend is reconstructed using up +to WindowWidth overlapping windows (WindowWidth windows exactly in the +inner points, just one window at the extremal points). - -- ALGLIB PROJECT -- - Copyright 30.06.2007 by Bochkanov Sergey -*************************************************************************/ -
    void alglib::spline2dlintransf(spline2dinterpolant c, double a, double b); +PERFORMANCE: this function has O((NTicks-WindowWidth)*WindowWidth*NBasis) + running time. If you work in time-constrained setting and + have to analyze just a few last ticks, choosing NTicks equal + to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth + will result in good compromise between noise cancellation and + analysis speed. -
    -

    Examples:   [1]  

    - -
    -
    /************************************************************************* -This subroutine performs linear transformation of the spline argument. +INPUT PARAMETERS: + S - SSA model + Data - array[NTicks], can be larger (only NTicks leading + elements will be used) + NTicks - number of ticks to analyze, Nticks>=1. + * special case of NTicks<WindowWidth is handled + by returning zeros as trend, and signal as noise -Input parameters: - C - spline interpolant - AX, BX - transformation coefficients: x = A*t + B - AY, BY - transformation coefficients: y = A*u + B -Result: - C - transformed spline +OUTPUT PARAMETERS: + Trend - array[NTicks], reconstructed trend line + Noise - array[NTicks], the rest of the signal; + it holds that ActualData = Trend+Noise. - -- ALGLIB PROJECT -- - Copyright 30.06.2007 by Bochkanov Sergey + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + +In any case, only basis is reused. Reconstruction is performed from +scratch every time you call this function. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* sequence being passed is shorter than the window length + +Calling this function in degenerate cases returns following result: +* in any case, NTicks ticks is returned +* trend is assumed to be zero +* noise is initialized by the sequence. + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is constructed). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dlintransxy( - spline2dinterpolant c, - double ax, - double bx, - double ay, - double by); +
    void alglib::ssaanalyzesequence( + ssamodel s, + real_1d_array data, + real_1d_array& trend, + real_1d_array& noise, + const xparams _params = alglib::xdefault); +void alglib::ssaanalyzesequence( + ssamodel s, + real_1d_array data, + ae_int_t nticks, + real_1d_array& trend, + real_1d_array& noise, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  [3]  

    +
     
    /************************************************************************* -Bicubic spline resampling +This function appends single point to last data sequence stored in the SSA +model and tries to update model in the incremental manner (if possible +with current algorithm). -Input parameters: - A - function values at the old grid, - array[0..OldHeight-1, 0..OldWidth-1] - OldHeight - old grid height, OldHeight>1 - OldWidth - old grid width, OldWidth>1 - NewHeight - new grid height, NewHeight>1 - NewWidth - new grid width, NewWidth>1 +If you want to add more than one point at once: +* if you want to add M points to the same sequence, perform M-1 calls with + UpdateIts parameter set to 0.0, and last call with non-zero UpdateIts. +* if you want to add new sequence, use ssaappendsequenceandupdate() -Output parameters: - B - function values at the new grid, - array[0..NewHeight-1, 0..NewWidth-1] +Running time of this function does NOT depend on dataset size, only on +window width and number of singular vectors. Depending on algorithm being +used, incremental update has complexity: +* for top-K real time - O(UpdateIts*K*Width^2), with fractional UpdateIts +* for top-K direct - O(Width^3) for any non-zero UpdateIts +* for precomputed basis - O(1), no update is performed - -- ALGLIB routine -- - 15 May, 2007 - Copyright by Bochkanov Sergey +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - new point + UpdateIts - >=0, floating point (!) value, desired update + frequency: + * zero value means that point is stored, but no + update is performed + * integer part of the value means that specified + number of iterations is always performed + * fractional part of the value means that one + iteration is performed with this probability. + + Recommended value: 0<UpdateIts<=1. Values larger + than 1 are VERY seldom needed. If your dataset + changes slowly, you can set it to 0.1 and skip + 90% of updates. + + In any case, no information is lost even with zero + value of UpdateIts! It will be incorporated into + model, sooner or later. + +OUTPUT PARAMETERS: + S - SSA model, updated + +NOTE: this function uses internal RNG to handle fractional values of + UpdateIts. By default it is initialized with fixed seed during + initial calculation of basis. Thus subsequent calls to this function + will result in the same sequence of pseudorandom decisions. + + However, if you have several SSA models which are calculated + simultaneously, and if you want to reduce computational bottlenecks + by performing random updates at random moments, then fixed seed is + not an option - all updates will fire at same moments. + + You may change it with ssasetseed() function. + +NOTE: this function throws an exception if called for empty dataset (there + is no "last" sequence to modify). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dresamplebicubic( - real_2d_array a, - ae_int_t oldheight, - ae_int_t oldwidth, - real_2d_array& b, - ae_int_t newheight, - ae_int_t newwidth); +
    void alglib::ssaappendpointandupdate( + ssamodel s, + double x, + double updateits, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -Bilinear spline resampling +This function appends new sequence to dataset stored in the SSA model and +tries to update model in the incremental manner (if possible with current +algorithm). -Input parameters: - A - function values at the old grid, - array[0..OldHeight-1, 0..OldWidth-1] - OldHeight - old grid height, OldHeight>1 - OldWidth - old grid width, OldWidth>1 - NewHeight - new grid height, NewHeight>1 - NewWidth - new grid width, NewWidth>1 +Notes: +* if you want to add M sequences at once, perform M-1 calls with UpdateIts + parameter set to 0.0, and last call with non-zero UpdateIts. +* if you want to add just one point, use ssaappendpointandupdate() -Output parameters: - B - function values at the new grid, - array[0..NewHeight-1, 0..NewWidth-1] +Running time of this function does NOT depend on dataset size, only on +sequence length, window width and number of singular vectors. Depending on +algorithm being used, incremental update has complexity: +* for top-K real time - O(UpdateIts*K*Width^2+(NTicks-Width)*Width^2) +* for top-K direct - O(Width^3+(NTicks-Width)*Width^2) +* for precomputed basis - O(1), no update is performed - -- ALGLIB routine -- - 09.07.2007 - Copyright by Bochkanov Sergey +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - new sequence, array[NTicks] or larget + NTicks - >=1, number of ticks in the sequence + UpdateIts - >=0, floating point (!) value, desired update + frequency: + * zero value means that point is stored, but no + update is performed + * integer part of the value means that specified + number of iterations is always performed + * fractional part of the value means that one + iteration is performed with this probability. + + Recommended value: 0<UpdateIts<=1. Values larger + than 1 are VERY seldom needed. If your dataset + changes slowly, you can set it to 0.1 and skip + 90% of updates. + + In any case, no information is lost even with zero + value of UpdateIts! It will be incorporated into + model, sooner or later. + +OUTPUT PARAMETERS: + S - SSA model, updated + +NOTE: this function uses internal RNG to handle fractional values of + UpdateIts. By default it is initialized with fixed seed during + initial calculation of basis. Thus subsequent calls to this function + will result in the same sequence of pseudorandom decisions. + + However, if you have several SSA models which are calculated + simultaneously, and if you want to reduce computational bottlenecks + by performing random updates at random moments, then fixed seed is + not an option - all updates will fire at same moments. + + You may change it with ssasetseed() function. + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dresamplebilinear( - real_2d_array a, - ae_int_t oldheight, - ae_int_t oldwidth, - real_2d_array& b, - ae_int_t newheight, - ae_int_t newwidth); +
    void alglib::ssaappendsequenceandupdate( + ssamodel s, + real_1d_array x, + double updateits, + const xparams _params = alglib::xdefault); +void alglib::ssaappendsequenceandupdate( + ssamodel s, + real_1d_array x, + ae_int_t nticks, + double updateits, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine was deprecated in ALGLIB 3.6.0 +This function clears all data stored in the model and invalidates all +basis components found so far. -We recommend you to switch to Spline2DUnpackV(), which is more flexible -and accepts its arguments in more convenient order. +INPUT PARAMETERS: + S - SSA model created with ssacreate() - -- ALGLIB PROJECT -- - Copyright 29.06.2007 by Bochkanov Sergey +OUTPUT PARAMETERS: + S - SSA model, updated + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dunpack( - spline2dinterpolant c, - ae_int_t& m, - ae_int_t& n, - real_2d_array& tbl); +
    void alglib::ssacleardata( + ssamodel s, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine unpacks two-dimensional spline into the coefficients table +This function creates SSA model object. Right after creation model is in +"dummy" mode - you can add data, but analyzing/prediction will return +just zeros (it assumes that basis is empty). -Input parameters: - C - spline interpolant. +HOW TO USE SSA MODEL: -Result: - M, N- grid size (x-axis and y-axis) - D - number of components - Tbl - coefficients table, unpacked format, - D - components: [0..(N-1)*(M-1)*D-1, 0..19]. - For T=0..D-1 (component index), I = 0...N-2 (x index), - J=0..M-2 (y index): - K := T + I*D + J*D*(N-1) +1. create model with ssacreate() +2. add data with one/many ssaaddsequence() calls +3. choose SSA algorithm with one of ssasetalgo...() functions: + * ssasetalgotopkdirect() for direct one-run analysis + * ssasetalgotopkrealtime() for algorithm optimized for many subsequent + runs with warm-start capabilities + * ssasetalgoprecomputed() for user-supplied basis +4. set window width with ssasetwindow() +5. perform one of the analysis-related activities: + a) call ssagetbasis() to get basis + b) call ssaanalyzelast() ssaanalyzesequence() or ssaanalyzelastwindow() + to perform analysis (trend/noise separation) + c) call one of the forecasting functions (ssaforecastlast() or + ssaforecastsequence()) to perform prediction; alternatively, you can + extract linear recurrence coefficients with ssagetlrr(). + SSA analysis will be performed during first call to analysis-related + function. SSA model is smart enough to track all changes in the dataset + and model settings, to cache previously computed basis and to + re-evaluate basis only when necessary. - K-th row stores decomposition for T-th component of the - vector-valued function +Additionally, if your setting involves constant stream of incoming data, +you can perform quick update already calculated model with one of the +incremental append-and-update functions: ssaappendpointandupdate() or +ssaappendsequenceandupdate(). - Tbl[K,0] = X[i] - Tbl[K,1] = X[i+1] - Tbl[K,2] = Y[j] - Tbl[K,3] = Y[j+1] - Tbl[K,4] = C00 - Tbl[K,5] = C01 - Tbl[K,6] = C02 - Tbl[K,7] = C03 - Tbl[K,8] = C10 - Tbl[K,9] = C11 - ... - Tbl[K,19] = C33 - On each grid square spline is equals to: - S(x) = SUM(c[i,j]*(t^i)*(u^j), i=0..3, j=0..3) - t = x-x[j] - u = y-y[i] +NOTE: steps (2), (3), (4) can be performed in arbitrary order. - -- ALGLIB PROJECT -- - Copyright 16.04.2012 by Bochkanov Sergey +INPUT PARAMETERS: + none + +OUTPUT PARAMETERS: + S - structure which stores model state + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline2dunpackv( - spline2dinterpolant c, - ae_int_t& m, - ae_int_t& n, - ae_int_t& d, - real_2d_array& tbl); +
    void alglib::ssacreate( + ssamodel& s, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +

    Examples:   [1]  [2]  [3]  

    +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +This function builds SSA basis and performs forecasting for a specified +number of ticks, returning value of trend. +Forecast is performed as follows: +* SSA trend extraction is applied to last M sliding windows of the + internally stored dataset +* for each of M sliding windows, M predictions are built +* average value of M predictions is returned -int main(int argc, char **argv) -{ - // - // We use bilinear spline to interpolate f(x,y)=x^2+2*y^2 sampled - // at (x,y) from [0.0, 0.5, 1.0] X [0.0, 1.0]. - // - real_1d_array x = "[0.0, 0.5, 1.0]"; - real_1d_array y = "[0.0, 1.0]"; - real_1d_array f = "[0.00,0.25,1.00,2.00,2.25,3.00]"; - double vx = 0.25; - double vy = 0.50; - double v; - double dx; - double dy; - double dxy; - spline2dinterpolant s; +This function has following running time: +* O(NBasis*WindowWidth*M) for trend extraction phase (always performed) +* O(WindowWidth*NTicks*M) for forecast phase - // build spline - spline2dbuildbicubicv(x, 3, y, 2, f, 1, s); +NOTE: noise reduction is ALWAYS applied by this algorithm; if you want to + apply recurrence relation to raw unprocessed data, use another + function - ssaforecastsequence() which allows to turn on and off + noise reduction phase. - // calculate S(0.25,0.50) - v = spline2dcalc(s, vx, vy); - printf("%.4f\n", double(v)); // EXPECTED: 1.0625 +NOTE: combination of several predictions results in lesser sensitivity to + noise, but it may produce undesirable discontinuities between last + point of the trend and first point of the prediction. The reason is + that last point of the trend is usually corrupted by noise, but + average value of several predictions is less sensitive to noise, + thus discontinuity appears. It is not a bug. - // calculate derivatives - spline2ddiff(s, vx, vy, v, dx, dy, dxy); - printf("%.4f\n", double(v)); // EXPECTED: 1.0625 - printf("%.4f\n", double(dx)); // EXPECTED: 0.5000 - printf("%.4f\n", double(dy)); // EXPECTED: 2.0000 - return 0; -} +INPUT PARAMETERS: + S - SSA model + M - number of sliding windows to combine, M>=1. If + your dataset has less than M sliding windows, this + parameter will be silently reduced. + NTicks - number of ticks to forecast, NTicks>=1 +OUTPUT PARAMETERS: + Trend - array[NTicks], predicted trend line -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
     
    -using namespace alglib;
    +CACHING/REUSE OF THE BASIS
     
    +Caching/reuse of previous results is performed:
    +* first call performs full run of SSA; basis is stored in the cache
    +* subsequent calls reuse previously cached basis
    +* if you call any function which changes model properties (window  length,
    +  algorithm, dataset), internal basis will be invalidated.
    +* the only calls which do NOT invalidate basis are listed below:
    +  a) ssasetwindow() with same window length
    +  b) ssaappendpointandupdate()
    +  c) ssaappendsequenceandupdate()
    +  d) ssasetalgotopk...() with exactly same K
    +  Calling these functions will result in reuse of previously found basis.
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // We use bilinear spline to interpolate f(x,y)=x^2+2*y^2 sampled 
    -    // at (x,y) from [0.0, 0.5, 1.0] X [0.0, 1.0].
    -    //
    -    real_1d_array x = "[0.0, 0.5, 1.0]";
    -    real_1d_array y = "[0.0, 1.0]";
    -    real_1d_array f = "[0.00,0.25,1.00,2.00,2.25,3.00]";
    -    double vx = 0.25;
    -    double vy = 0.50;
    -    double v;
    -    spline2dinterpolant s;
     
    -    // build spline
    -    spline2dbuildbilinearv(x, 3, y, 2, f, 1, s);
    +HANDLING OF DEGENERATE CASES
     
    -    // calculate S(0.25,0.50)
    -    v = spline2dcalc(s, vx, vy);
    -    printf("%.4f\n", double(v)); // EXPECTED: 1.1250
    -    return 0;
    -}
    +Following degenerate cases may happen:
    +* dataset is empty (no analysis can be done)
    +* all sequences are shorter than the window length,no analysis can be done
    +* no algorithm is specified (no analysis can be done)
    +* last sequence is shorter than the WindowWidth   (analysis  can  be done,
    +  but we can not perform forecasting on the last sequence)
    +* window lentgh is 1 (impossible to use for forecasting)
    +* SSA analysis algorithm is  configured  to  extract  basis  whose size is
    +  equal to window length (impossible to use for  forecasting;  only  basis
    +  whose size is less than window length can be used).
     
    +Calling this function in degenerate cases returns following result:
    +* NTicks  copies  of  the  last  value is returned for non-empty task with
    +  large enough dataset, but with overcomplete  basis  (window  width=1  or
    +  basis size is equal to window width)
    +* zero trend with length=NTicks is returned for empty task
     
    -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +No analysis is performed in degenerate cases (we immediately return  dummy
    +values, no basis is ever constructed).
     
    -using namespace alglib;
    +  -- ALGLIB --
    +     Copyright 30.10.2017 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::ssaforecastavglast( + ssamodel s, + ae_int_t m, + ae_int_t nticks, + real_1d_array& trend, + const xparams _params = alglib::xdefault); +
    + +
    +
    /************************************************************************* +This function builds SSA basis and performs forecasting for a user- +specified sequence, returning value of trend. -int main(int argc, char **argv) -{ - // - // We build bilinear spline for f(x,y)=x+2*y for (x,y) in [0,1]. - // Then we apply several transformations to this spline. - // - real_1d_array x = "[0.0, 1.0]"; - real_1d_array y = "[0.0, 1.0]"; - real_1d_array f = "[0.00,1.00,2.00,3.00]"; - spline2dinterpolant s; - spline2dinterpolant snew; - double v; - spline2dbuildbilinearv(x, 2, y, 2, f, 1, s); +Forecasting is done in two stages: +* first, we extract trend from M last sliding windows of the sequence. + This stage is optional, you can turn it off if you pass data which + are already processed with SSA. Of course, you can turn it off even + for raw data, but it is not recommended - noise suppression is very + important for correct prediction. +* then, we apply LRR independently for M sliding windows +* average of M predictions is returned - // copy spline, apply transformation x:=2*xnew, y:=4*ynew - // evaluate at (xnew,ynew) = (0.25,0.25) - should be same as (x,y)=(0.5,1.0) - spline2dcopy(s, snew); - spline2dlintransxy(snew, 2.0, 0.0, 4.0, 0.0); - v = spline2dcalc(snew, 0.25, 0.25); - printf("%.4f\n", double(v)); // EXPECTED: 2.500 +This function has following running time: +* O(NBasis*WindowWidth*M) for trend extraction phase +* O(WindowWidth*NTicks*M) for forecast phase - // copy spline, apply transformation SNew:=2*S+3 - spline2dcopy(s, snew); - spline2dlintransf(snew, 2.0, 3.0); - v = spline2dcalc(snew, 0.5, 1.0); - printf("%.4f\n", double(v)); // EXPECTED: 8.000 +NOTE: combination of several predictions results in lesser sensitivity to + noise, but it may produce undesirable discontinuities between last + point of the trend and first point of the prediction. The reason is + that last point of the trend is usually corrupted by noise, but + average value of several predictions is less sensitive to noise, + thus discontinuity appears. It is not a bug. - // - // Same example, but for vector spline (f0,f1) = {x+2*y, 2*x+y} - // - real_1d_array f2 = "[0.00,0.00, 1.00,2.00, 2.00,1.00, 3.00,3.00]"; - real_1d_array vr; - spline2dbuildbilinearv(x, 2, y, 2, f2, 2, s); +INPUT PARAMETERS: + S - SSA model + Data - array[NTicks], data to forecast + DataLen - number of ticks in the data, DataLen>=1 + M - number of sliding windows to combine, M>=1. If + your dataset has less than M sliding windows, this + parameter will be silently reduced. + ForecastLen - number of ticks to predict, ForecastLen>=1 + ApplySmoothing - whether to apply smoothing trend extraction or not. + if you do not know what to specify, pass true. - // copy spline, apply transformation x:=2*xnew, y:=4*ynew - spline2dcopy(s, snew); - spline2dlintransxy(snew, 2.0, 0.0, 4.0, 0.0); - spline2dcalcv(snew, 0.25, 0.25, vr); - printf("%s\n", vr.tostring(4).c_str()); // EXPECTED: [2.500,2.000] +OUTPUT PARAMETERS: + Trend - array[ForecastLen], forecasted trend - // copy spline, apply transformation SNew:=2*S+3 - spline2dcopy(s, snew); - spline2dlintransf(snew, 2.0, 3.0); - spline2dcalcv(snew, 0.5, 1.0, vr); - printf("%s\n", vr.tostring(4).c_str()); // EXPECTED: [8.000,7.000] - return 0; -} +CACHING/REUSE OF THE BASIS -
    -
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    +Caching/reuse of previous results is performed:
    +* first call performs full run of SSA; basis is stored in the cache
    +* subsequent calls reuse previously cached basis
    +* if you call any function which changes model properties (window  length,
    +  algorithm, dataset), internal basis will be invalidated.
    +* the only calls which do NOT invalidate basis are listed below:
    +  a) ssasetwindow() with same window length
    +  b) ssaappendpointandupdate()
    +  c) ssaappendsequenceandupdate()
    +  d) ssasetalgotopk...() with exactly same K
    +  Calling these functions will result in reuse of previously found basis.
     
    -using namespace alglib;
     
    +HANDLING OF DEGENERATE CASES
     
    -int main(int argc, char **argv)
    -{
    -    //
    -    // We build bilinear spline for f(x,y)=x+2*y+3*xy for (x,y) in [0,1].
    -    // Then we demonstrate how to unpack it.
    -    //
    -    real_1d_array x = "[0.0, 1.0]";
    -    real_1d_array y = "[0.0, 1.0]";
    -    real_1d_array f = "[0.00,1.00,2.00,6.00]";
    -    real_2d_array c;
    -    ae_int_t m;
    -    ae_int_t n;
    -    ae_int_t d;
    -    spline2dinterpolant s;
    +Following degenerate cases may happen:
    +* dataset is empty (no analysis can be done)
    +* all sequences are shorter than the window length,no analysis can be done
    +* no algorithm is specified (no analysis can be done)
    +* data sequence is shorter than the WindowWidth   (analysis  can  be done,
    +  but we can not perform forecasting on the last sequence)
    +* window lentgh is 1 (impossible to use for forecasting)
    +* SSA analysis algorithm is  configured  to  extract  basis  whose size is
    +  equal to window length (impossible to use for  forecasting;  only  basis
    +  whose size is less than window length can be used).
     
    -    // build spline
    -    spline2dbuildbilinearv(x, 2, y, 2, f, 1, s);
    +Calling this function in degenerate cases returns following result:
    +* ForecastLen copies of the last value is returned for non-empty task with
    +  large enough dataset, but with overcomplete  basis  (window  width=1  or
    +  basis size is equal to window width)
    +* zero trend with length=ForecastLen is returned for empty task
     
    -    // unpack and test
    -    spline2dunpackv(s, m, n, d, c);
    -    printf("%s\n", c.tostring(4).c_str()); // EXPECTED: [[0, 1, 0, 1, 0,2,0,0, 1,3,0,0, 0,0,0,0, 0,0,0,0 ]]
    -    return 0;
    -}
    +No analysis is performed in degenerate cases (we immediately return  dummy
    +values, no basis is ever constructed).
     
    +  -- ALGLIB --
    +     Copyright 30.10.2017 by Bochkanov Sergey
    +*************************************************************************/
    +
    void alglib::ssaforecastavgsequence( + ssamodel s, + real_1d_array data, + ae_int_t m, + ae_int_t forecastlen, + real_1d_array& trend, + const xparams _params = alglib::xdefault); +void alglib::ssaforecastavgsequence( + ssamodel s, + real_1d_array data, + ae_int_t datalen, + ae_int_t m, + ae_int_t forecastlen, + bool applysmoothing, + real_1d_array& trend, + const xparams _params = alglib::xdefault); -
    + +
    -#include "stdafx.h"
    -#include <stdlib.h>
    -#include <stdio.h>
    -#include <math.h>
    -#include "interpolation.h"
    -
    -using namespace alglib;
    +
    /************************************************************************* +This function builds SSA basis and performs forecasting for a specified +number of ticks, returning value of trend. +Forecast is performed as follows: +* SSA trend extraction is applied to last WindowWidth elements of the + internally stored dataset; this step is basically a noise reduction. +* linear recurrence relation is applied to extracted trend -int main(int argc, char **argv) -{ - // - // We build bilinear vector-valued spline (f0,f1) = {x+2*y, 2*x+y} - // Spline is built using function values at 2x2 grid: (x,y)=[0,1]*[0,1] - // Then we perform evaluation at (x,y)=(0.1,0.3) - // - real_1d_array x = "[0.0, 1.0]"; - real_1d_array y = "[0.0, 1.0]"; - real_1d_array f = "[0.00,0.00, 1.00,2.00, 2.00,1.00, 3.00,3.00]"; - spline2dinterpolant s; - real_1d_array vr; - spline2dbuildbilinearv(x, 2, y, 2, f, 2, s); - spline2dcalcv(s, 0.1, 0.3, vr); - printf("%s\n", vr.tostring(4).c_str()); // EXPECTED: [0.700,0.500] - return 0; -} +This function has following running time: +* O(NBasis*WindowWidth) for trend extraction phase (always performed) +* O(WindowWidth*NTicks) for forecast phase +NOTE: noise reduction is ALWAYS applied by this algorithm; if you want to + apply recurrence relation to raw unprocessed data, use another + function - ssaforecastsequence() which allows to turn on and off + noise reduction phase. -
    -
    - -spline3dinterpolant
    - -spline3dbuildtrilinearv
    -spline3dcalc
    -spline3dcalcv
    -spline3dcalcvbuf
    -spline3dlintransf
    -spline3dlintransxyz
    -spline3dresampletrilinear
    -spline3dunpackv
    - - - - -
    spline3d_trilinear Trilinear spline interpolation
    spline3d_vector Vector-valued trilinear spline interpolation
    - -
    -
    /************************************************************************* -3-dimensional spline inteprolant +NOTE: this algorithm performs prediction using only one - last - sliding + window. Predictions produced by such approach are smooth + continuations of the reconstructed trend line, but they can be + easily corrupted by noise. If you need noise-resistant prediction, + use ssaforecastavglast() function, which averages predictions built + using several sliding windows. + +INPUT PARAMETERS: + S - SSA model + NTicks - number of ticks to forecast, NTicks>=1 + +OUTPUT PARAMETERS: + Trend - array[NTicks], predicted trend line + + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). + +Calling this function in degenerate cases returns following result: +* NTicks copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=NTicks is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    class spline3dinterpolant -{ -}; +
    void alglib::ssaforecastlast( + ssamodel s, + ae_int_t nticks, + real_1d_array& trend, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This subroutine builds trilinear vector-valued spline. +This function builds SSA basis and performs forecasting for a user- +specified sequence, returning value of trend. + +Forecasting is done in two stages: +* first, we extract trend from the WindowWidth last elements of the + sequence. This stage is optional, you can turn it off if you pass + data which are already processed with SSA. Of course, you can turn it + off even for raw data, but it is not recommended - noise suppression is + very important for correct prediction. +* then, we apply LRR for last WindowWidth-1 elements of the extracted + trend. + +This function has following running time: +* O(NBasis*WindowWidth) for trend extraction phase +* O(WindowWidth*NTicks) for forecast phase + +NOTE: this algorithm performs prediction using only one - last - sliding + window. Predictions produced by such approach are smooth + continuations of the reconstructed trend line, but they can be + easily corrupted by noise. If you need noise-resistant prediction, + use ssaforecastavgsequence() function, which averages predictions + built using several sliding windows. INPUT PARAMETERS: - X - spline abscissas, array[0..N-1] - Y - spline ordinates, array[0..M-1] - Z - spline applicates, array[0..L-1] - F - function values, array[0..M*N*L*D-1]: - * first D elements store D values at (X[0],Y[0],Z[0]) - * next D elements store D values at (X[1],Y[0],Z[0]) - * next D elements store D values at (X[2],Y[0],Z[0]) - * ... - * next D elements store D values at (X[0],Y[1],Z[0]) - * next D elements store D values at (X[1],Y[1],Z[0]) - * next D elements store D values at (X[2],Y[1],Z[0]) - * ... - * next D elements store D values at (X[0],Y[0],Z[1]) - * next D elements store D values at (X[1],Y[0],Z[1]) - * next D elements store D values at (X[2],Y[0],Z[1]) - * ... - * general form - D function values at (X[i],Y[j]) are stored - at F[D*(N*(M*K+J)+I)...D*(N*(M*K+J)+I)+D-1]. - M,N, - L - grid size, M>=2, N>=2, L>=2 - D - vector dimension, D>=1 + S - SSA model + Data - array[NTicks], data to forecast + DataLen - number of ticks in the data, DataLen>=1 + ForecastLen - number of ticks to predict, ForecastLen>=1 + ApplySmoothing - whether to apply smoothing trend extraction or not; + if you do not know what to specify, pass True. OUTPUT PARAMETERS: - C - spline interpolant + Trend - array[ForecastLen], forecasted trend - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* data sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). + +Calling this function in degenerate cases returns following result: +* ForecastLen copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=ForecastLen is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dbuildtrilinearv( - real_1d_array x, - ae_int_t n, - real_1d_array y, - ae_int_t m, - real_1d_array z, - ae_int_t l, - real_1d_array f, - ae_int_t d, - spline3dinterpolant& c); +
    void alglib::ssaforecastsequence( + ssamodel s, + real_1d_array data, + ae_int_t forecastlen, + real_1d_array& trend, + const xparams _params = alglib::xdefault); +void alglib::ssaforecastsequence( + ssamodel s, + real_1d_array data, + ae_int_t datalen, + ae_int_t forecastlen, + bool applysmoothing, + real_1d_array& trend, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  [2]  

    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This subroutine calculates the value of the trilinear or tricubic spline at -the given point (X,Y,Z). +This function executes SSA on internally stored dataset and returns basis +found by current method. INPUT PARAMETERS: - C - coefficients table. - Built by BuildBilinearSpline or BuildBicubicSpline. - X, Y, - Z - point + S - SSA model -Result: - S(x,y,z) +OUTPUT PARAMETERS: + A - array[WindowWidth,NBasis], basis; vectors are + stored in matrix columns, by descreasing variance + SV - array[NBasis]: + * zeros - for model initialized with SSASetAlgoPrecomputed() + * singular values - for other algorithms + WindowWidth - current window + NBasis - basis size - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Calling this function in degenerate cases (no data or all data are +shorter than window size; no algorithm is specified) returns basis with +just one zero vector. + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    double alglib::spline3dcalc( - spline3dinterpolant c, - double x, - double y, - double z); +
    void alglib::ssagetbasis( + ssamodel s, + real_2d_array& a, + real_1d_array& sv, + ae_int_t& windowwidth, + ae_int_t& nbasis, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This subroutine calculates trilinear or tricubic vector-valued spline at the -given point (X,Y,Z). +This function returns linear recurrence relation (LRR) coefficients found +by current SSA algorithm. INPUT PARAMETERS: - C - spline interpolant. - X, Y, - Z - point + S - SSA model OUTPUT PARAMETERS: - F - array[D] which stores function values. F is out-parameter and - it is reallocated after call to this function. In case you - want to reuse previously allocated F, you may use - Spline2DCalcVBuf(), which reallocates F only when it is too - small. + A - array[WindowWidth-1]. Coefficients of the + linear recurrence of the form: + X[W-1] = X[W-2]*A[W-2] + X[W-3]*A[W-3] + ... + X[0]*A[0]. + Empty array for WindowWidth=1. + WindowWidth - current window width - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Calling this function in degenerate cases (no data or all data are +shorter than window size; no algorithm is specified) returns zeros. + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dcalcv( - spline3dinterpolant c, - double x, - double y, - double z, - real_1d_array& f); +
    void alglib::ssagetlrr( + ssamodel s, + real_1d_array& a, + ae_int_t& windowwidth, + const xparams _params = alglib::xdefault);
    -

    Examples:   [1]  

    - +
     
    /************************************************************************* -This subroutine calculates bilinear or bicubic vector-valued spline at the -given point (X,Y,Z). +This function sets SSA algorithm to "precomputed vectors" algorithm. + +This algorithm uses precomputed set of orthonormal (orthogonal AND +normalized) basis vectors supplied by user. Thus, basis calculation phase +is not performed - we already have our basis - and only analysis/ +forecasting phase requires actual calculations. + +This algorithm may handle "append" requests which add just one/few ticks +to the end of the last sequence in O(1) time. + +NOTE: this algorithm accepts both basis and window width, because these + two parameters are naturally aligned. Calling this function sets + window width; if you call ssasetwindow() with other window width, + then during analysis stage algorithm will detect conflict and reset + to zero basis. INPUT PARAMETERS: - C - spline interpolant. - X, Y, - Z - point - F - output buffer, possibly preallocated array. In case array size - is large enough to store result, it is not reallocated. Array - which is too short will be reallocated + S - SSA model + A - array[WindowWidth,NBasis], orthonormalized basis; + this function does NOT control orthogonality and + does NOT perform any kind of renormalization. It + is your responsibility to provide it with correct + basis. + WindowWidth - window width, >=1 + NBasis - number of basis vectors, 1<=NBasis<=WindowWidth OUTPUT PARAMETERS: - F - array[D] (or larger) which stores function values + S - updated model - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey +NOTE: calling this function invalidates basis in all cases. + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dcalcvbuf( - spline3dinterpolant c, - double x, - double y, - double z, - real_1d_array& f); +
    void alglib::ssasetalgoprecomputed( + ssamodel s, + real_2d_array a, + const xparams _params = alglib::xdefault); +void alglib::ssasetalgoprecomputed( + ssamodel s, + real_2d_array a, + ae_int_t windowwidth, + ae_int_t nbasis, + const xparams _params = alglib::xdefault);
    - +
     
    /************************************************************************* -This subroutine performs linear transformation of the spline. +This function sets SSA algorithm to "direct top-K" algorithm. + +"Direct top-K" algorithm performs full SVD of the N*WINDOW trajectory +matrix (hence its name - direct solver is used), then extracts top K +components. Overall running time is O(N*WINDOW^2), where N is a number of +ticks in the dataset, WINDOW is window width. + +This algorithm may handle "append" requests which add just one/few ticks +to the end of the last sequence in O(WINDOW^3) time, which is ~N/WINDOW +times faster than re-computing everything from scratch. INPUT PARAMETERS: - C - spline interpolant. - A, B- transformation coefficients: S2(x,y) = A*S(x,y,z) + B + S - SSA model + TopK - number of components to analyze; TopK>=1. OUTPUT PARAMETERS: - C - transformed spline + S - updated model - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey + +NOTE: TopK>WindowWidth is silently decreased to WindowWidth during analysis + phase + +NOTE: calling this function invalidates basis, except for the situation + when this algorithm was already set with same parameters. + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dlintransf(spline3dinterpolant c, double a, double b); +
    void alglib::ssasetalgotopkdirect( + ssamodel s, + ae_int_t topk, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  

    +
     
    /************************************************************************* -This subroutine performs linear transformation of the spline argument. +This function sets SSA algorithm to "top-K real time algorithm". This algo +extracts K components with largest singular values. + +It is real-time version of top-K algorithm which is optimized for +incremental processing and fast start-up. Internally it uses subspace +eigensolver for truncated SVD. It results in ability to perform quick +updates of the basis when only a few points/sequences is added to dataset. + +Performance profile of the algorithm is given below: +* O(K*WindowWidth^2) running time for incremental update of the dataset + with one of the "append-and-update" functions (ssaappendpointandupdate() + or ssaappendsequenceandupdate()). +* O(N*WindowWidth^2) running time for initial basis evaluation (N=size of + dataset) +* ability to split costly initialization across several incremental + updates of the basis (so called "Power-Up" functionality, activated by + ssasetpoweruplength() function) INPUT PARAMETERS: - C - spline interpolant - AX, BX - transformation coefficients: x = A*u + B - AY, BY - transformation coefficients: y = A*v + B - AZ, BZ - transformation coefficients: z = A*w + B + S - SSA model + TopK - number of components to analyze; TopK>=1. OUTPUT PARAMETERS: - C - transformed spline + S - updated model - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey +NOTE: this algorithm is optimized for large-scale tasks with large + datasets. On toy problems with just 5-10 points it can return basis + which is slightly different from that returned by direct algorithm + (ssasetalgotopkdirect() function). However, the difference becomes + negligible as dataset grows. + +NOTE: TopK>WindowWidth is silently decreased to WindowWidth during analysis + phase + +NOTE: calling this function invalidates basis, except for the situation + when this algorithm was already set with same parameters. + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dlintransxyz( - spline3dinterpolant c, - double ax, - double bx, - double ay, - double by, - double az, - double bz); +
    void alglib::ssasetalgotopkrealtime( + ssamodel s, + ae_int_t topk, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -Trilinear spline resampling +This function sets memory limit of SSA analysis. + +Straightforward SSA with sequence length T and window width W needs O(T*W) +memory. It is possible to reduce memory consumption by splitting task into +smaller chunks. + +Thus function allows you to specify approximate memory limit (measured in +double precision numbers used for buffers). Actual memory consumption will +be comparable to the number specified by you. + +Default memory limit is 50.000.000 (400Mbytes) in current version. INPUT PARAMETERS: - A - array[0..OldXCount*OldYCount*OldZCount-1], function - values at the old grid, : - A[0] x=0,y=0,z=0 - A[1] x=1,y=0,z=0 - A[..] ... - A[..] x=oldxcount-1,y=0,z=0 - A[..] x=0,y=1,z=0 - A[..] ... - ... - OldZCount - old Z-count, OldZCount>1 - OldYCount - old Y-count, OldYCount>1 - OldXCount - old X-count, OldXCount>1 - NewZCount - new Z-count, NewZCount>1 - NewYCount - new Y-count, NewYCount>1 - NewXCount - new X-count, NewXCount>1 + S - SSA model + MemLimit- memory limit, >=0. Zero value means no limit. -OUTPUT PARAMETERS: - B - array[0..NewXCount*NewYCount*NewZCount-1], function - values at the new grid: - B[0] x=0,y=0,z=0 - B[1] x=1,y=0,z=0 - B[..] ... - B[..] x=newxcount-1,y=0,z=0 - B[..] x=0,y=1,z=0 - B[..] ... - ... + -- ALGLIB -- + Copyright 20.12.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::ssasetmemorylimit( + ssamodel s, + ae_int_t memlimit, + const xparams _params = alglib::xdefault); - -- ALGLIB routine -- - 26.04.2012 - Copyright by Bochkanov Sergey +
    + +
    +
    /************************************************************************* +This function sets length of power-up cycle for real-time algorithm. + +By default, this algorithm performs costly O(N*WindowWidth^2) init phase +followed by full run of truncated EVD. However, if you are ready to +live with a bit lower-quality basis during first few iterations, you can +split this O(N*WindowWidth^2) initialization between several subsequent +append-and-update rounds. It results in better latency of the algorithm. + +This function invalidates basis/solver, next analysis call will result in +full recalculation of everything. + +INPUT PARAMETERS: + S - SSA model + PWLen - length of the power-up stage: + * 0 means that no power-up is requested + * 1 is the same as 0 + * >1 means that delayed power-up is performed + + -- ALGLIB -- + Copyright 03.11.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dresampletrilinear( - real_1d_array a, - ae_int_t oldzcount, - ae_int_t oldycount, - ae_int_t oldxcount, - ae_int_t newzcount, - ae_int_t newycount, - ae_int_t newxcount, - real_1d_array& b); +
    void alglib::ssasetpoweruplength( + ssamodel s, + ae_int_t pwlen, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  

    +
     
    /************************************************************************* -This subroutine unpacks tri-dimensional spline into the coefficients table +This function sets seed which is used to initialize internal RNG when +we make pseudorandom decisions on model updates. + +By default, deterministic seed is used - which results in same sequence of +pseudorandom decisions every time you run SSA model. If you specify non- +deterministic seed value, then SSA model may return slightly different +results after each run. + +This function can be useful when you have several SSA models updated with +sseappendpointandupdate() called with 0<UpdateIts<1 (fractional value) and +due to performance limitations want them to perform updates at different +moments. INPUT PARAMETERS: - C - spline interpolant. + S - SSA model + Seed - seed: + * positive values = use deterministic seed for each run of + algorithms which depend on random initialization + * zero or negative values = use non-deterministic seed -Result: - N - grid size (X) - M - grid size (Y) - L - grid size (Z) - D - number of components - SType- spline type. Currently, only one spline type is supported: - trilinear spline, as indicated by SType=1. - Tbl - spline coefficients: [0..(N-1)*(M-1)*(L-1)*D-1, 0..13]. - For T=0..D-1 (component index), I = 0...N-2 (x index), - J=0..M-2 (y index), K=0..L-2 (z index): - Q := T + I*D + J*D*(N-1) + K*D*(N-1)*(M-1), + -- ALGLIB -- + Copyright 03.11.2017 by Bochkanov Sergey +*************************************************************************/ +
    void alglib::ssasetseed( + ssamodel s, + ae_int_t seed, + const xparams _params = alglib::xdefault); - Q-th row stores decomposition for T-th component of the - vector-valued function +
    + +
    +
    /************************************************************************* +This function sets window width for SSA model. You should call it before +analysis phase. Default window width is 1 (not for real use). - Tbl[Q,0] = X[i] - Tbl[Q,1] = X[i+1] - Tbl[Q,2] = Y[j] - Tbl[Q,3] = Y[j+1] - Tbl[Q,4] = Z[k] - Tbl[Q,5] = Z[k+1] +Special notes: +* this function call can be performed at any moment before first call to + analysis-related functions +* changing window width invalidates internally stored basis; if you change + window width AFTER you call analysis-related function, next analysis + phase will require re-calculation of the basis according to current + algorithm. +* calling this function with exactly same window width as current one has + no effect +* if you specify window width larger than any data sequence stored in the + model, analysis will return zero basis. - Tbl[Q,6] = C000 - Tbl[Q,7] = C100 - Tbl[Q,8] = C010 - Tbl[Q,9] = C110 - Tbl[Q,10]= C001 - Tbl[Q,11]= C101 - Tbl[Q,12]= C011 - Tbl[Q,13]= C111 - On each grid square spline is equals to: - S(x) = SUM(c[i,j,k]*(x^i)*(y^j)*(z^k), i=0..1, j=0..1, k=0..1) - t = x-x[j] - u = y-y[i] - v = z-z[k] +INPUT PARAMETERS: + S - SSA model created with ssacreate() + WindowWidth - >=1, new window width - NOTE: format of Tbl is given for SType=1. Future versions of - ALGLIB can use different formats for different values of - SType. +OUTPUT PARAMETERS: + S - SSA model, updated - -- ALGLIB PROJECT -- - Copyright 26.04.2012 by Bochkanov Sergey + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -
    void alglib::spline3dunpackv( - spline3dinterpolant c, - ae_int_t& n, - ae_int_t& m, - ae_int_t& l, - ae_int_t& d, - ae_int_t& stype, - real_2d_array& tbl); +
    void alglib::ssasetwindow( + ssamodel s, + ae_int_t windowwidth, + const xparams _params = alglib::xdefault);
    - +

    Examples:   [1]  [2]  [3]  

    +
     #include "stdafx.h"
     #include <stdlib.h>
     #include <stdio.h>
     #include <math.h>
    -#include "interpolation.h"
    +#include "dataanalysis.h"
     
     using namespace alglib;
     
    @@ -45032,47 +57247,53 @@
     int main(int argc, char **argv)
     {
         //
    -    // We use trilinear spline to interpolate f(x,y,z)=x+xy+z sampled 
    -    // at (x,y,z) from [0.0, 1.0] X [0.0, 1.0] X [0.0, 1.0].
    -    //
    -    // We store x, y and z-values at local arrays with same names.
    -    // Function values are stored in the array F as follows:
    -    //     f[0]     (x,y,z) = (0,0,0)
    -    //     f[1]     (x,y,z) = (1,0,0)
    -    //     f[2]     (x,y,z) = (0,1,0)
    -    //     f[3]     (x,y,z) = (1,1,0)
    -    //     f[4]     (x,y,z) = (0,0,1)
    -    //     f[5]     (x,y,z) = (1,0,1)
    -    //     f[6]     (x,y,z) = (0,1,1)
    -    //     f[7]     (x,y,z) = (1,1,1)
    +    // Here we demonstrate SSA trend/noise separation for some toy problem:
    +    // small monotonically growing series X are analyzed with 3-tick window
    +    // and "top-K" version of SSA, which selects K largest singular vectors
    +    // for analysis, with K=1.
         //
    -    real_1d_array x = "[0.0, 1.0]";
    -    real_1d_array y = "[0.0, 1.0]";
    -    real_1d_array z = "[0.0, 1.0]";
    -    real_1d_array f = "[0,1,0,2,1,2,1,3]";
    -    double vx = 0.50;
    -    double vy = 0.50;
    -    double vz = 0.50;
    -    double v;
    -    spline3dinterpolant s;
    +    ssamodel s;
    +    real_1d_array x = "[0,0.5,1,1,1.5,2]";
     
    -    // build spline
    -    spline3dbuildtrilinearv(x, 2, y, 2, z, 2, f, 1, s);
    +    //
    +    // First, we create SSA model, set its properties and add dataset.
    +    //
    +    // We use window with width=3 and configure model to use direct SSA
    +    // algorithm - one which runs exact O(N*W^2) analysis - to extract
    +    // one top singular vector. Well, it is toy problem :)
    +    //
    +    // NOTE: SSA model may store and analyze more than one sequence
    +    //       (say, different sequences may correspond to data collected
    +    //       from different devices)
    +    //
    +    ssacreate(s);
    +    ssasetwindow(s, 3);
    +    ssaaddsequence(s, x);
    +    ssasetalgotopkdirect(s, 1);
     
    -    // calculate S(0.5,0.5,0.5)
    -    v = spline3dcalc(s, vx, vy, vz);
    -    printf("%.4f\n", double(v)); // EXPECTED: 1.2500
    +    //
    +    // Now we begin analysis. Internally SSA model stores everything it needs:
    +    // data, settings, solvers and so on. Right after first call to analysis-
    +    // related function it will analyze dataset, build basis and perform analysis.
    +    //
    +    // Subsequent calls to analysis functions will reuse previously computed
    +    // basis, unless you invalidate it by changing model settings (or dataset).
    +    //
    +    real_1d_array trend;
    +    real_1d_array noise;
    +    ssaanalyzesequence(s, x, trend, noise);
    +    printf("%s\n", trend.tostring(2).c_str()); // EXPECTED: [0.3815,0.5582,0.7810,1.0794,1.5041,2.0105]
         return 0;
     }
     
     
    -
    +
     #include "stdafx.h"
     #include <stdlib.h>
     #include <stdio.h>
     #include <math.h>
    -#include "interpolation.h"
    +#include "dataanalysis.h"
     
     using namespace alglib;
     
    @@ -45080,44 +57301,160 @@
     int main(int argc, char **argv)
     {
         //
    -    // We use trilinear vector-valued spline to interpolate {f0,f1}={x+xy+z,x+xy+yz+z}
    -    // sampled at (x,y,z) from [0.0, 1.0] X [0.0, 1.0] X [0.0, 1.0].
    +    // Here we demonstrate SSA forecasting on some toy problem with clearly
    +    // visible linear trend and small amount of noise.
         //
    -    // We store x, y and z-values at local arrays with same names.
    -    // Function values are stored in the array F as follows:
    -    //     f[0]     f0, (x,y,z) = (0,0,0)
    -    //     f[1]     f1, (x,y,z) = (0,0,0)
    -    //     f[2]     f0, (x,y,z) = (1,0,0)
    -    //     f[3]     f1, (x,y,z) = (1,0,0)
    -    //     f[4]     f0, (x,y,z) = (0,1,0)
    -    //     f[5]     f1, (x,y,z) = (0,1,0)
    -    //     f[6]     f0, (x,y,z) = (1,1,0)
    -    //     f[7]     f1, (x,y,z) = (1,1,0)
    -    //     f[8]     f0, (x,y,z) = (0,0,1)
    -    //     f[9]     f1, (x,y,z) = (0,0,1)
    -    //     f[10]    f0, (x,y,z) = (1,0,1)
    -    //     f[11]    f1, (x,y,z) = (1,0,1)
    -    //     f[12]    f0, (x,y,z) = (0,1,1)
    -    //     f[13]    f1, (x,y,z) = (0,1,1)
    -    //     f[14]    f0, (x,y,z) = (1,1,1)
    -    //     f[15]    f1, (x,y,z) = (1,1,1)
    +    ssamodel s;
    +    real_1d_array x = "[0.05,0.96,2.04,3.11,3.97,5.03,5.98,7.02,8.02]";
    +
         //
    -    real_1d_array x = "[0.0, 1.0]";
    -    real_1d_array y = "[0.0, 1.0]";
    -    real_1d_array z = "[0.0, 1.0]";
    -    real_1d_array f = "[0,0, 1,1, 0,0, 2,2, 1,1, 2,2, 1,2, 3,4]";
    -    double vx = 0.50;
    -    double vy = 0.50;
    -    double vz = 0.50;
    -    spline3dinterpolant s;
    +    // First, we create SSA model, set its properties and add dataset.
    +    //
    +    // We use window with width=3 and configure model to use direct SSA
    +    // algorithm - one which runs exact O(N*W^2) analysis - to extract
    +    // two top singular vectors. Well, it is toy problem :)
    +    //
    +    // NOTE: SSA model may store and analyze more than one sequence
    +    //       (say, different sequences may correspond to data collected
    +    //       from different devices)
    +    //
    +    ssacreate(s);
    +    ssasetwindow(s, 3);
    +    ssaaddsequence(s, x);
    +    ssasetalgotopkdirect(s, 2);
     
    -    // build spline
    -    spline3dbuildtrilinearv(x, 2, y, 2, z, 2, f, 2, s);
    +    //
    +    // Now we begin analysis. Internally SSA model stores everything it needs:
    +    // data, settings, solvers and so on. Right after first call to analysis-
    +    // related function it will analyze dataset, build basis and perform analysis.
    +    //
    +    // Subsequent calls to analysis functions will reuse previously computed
    +    // basis, unless you invalidate it by changing model settings (or dataset).
    +    //
    +    // In this example we show how to use ssaforecastlast() function, which
    +    // predicts changed in the last sequence of the dataset. If you want to
    +    // perform prediction for some other sequence, use ssaforecastsequence().
    +    //
    +    real_1d_array trend;
    +    ssaforecastlast(s, 3, trend);
     
    -    // calculate S(0.5,0.5,0.5) - we have vector of values instead of single value
    -    real_1d_array v;
    -    spline3dcalcv(s, vx, vy, vz, v);
    -    printf("%s\n", v.tostring(4).c_str()); // EXPECTED: [1.2500,1.5000]
    +    //
    +    // Well, we expected it to be [9,10,11]. There exists some difference,
    +    // which can be explained by the artificial noise in the dataset.
    +    //
    +    printf("%s\n", trend.tostring(2).c_str()); // EXPECTED: [9.0005,9.9322,10.8051]
    +    return 0;
    +}
    +
    +
    +
    +
    +#include "stdafx.h"
    +#include <stdlib.h>
    +#include <stdio.h>
    +#include <math.h>
    +#include "dataanalysis.h"
    +
    +using namespace alglib;
    +
    +
    +int main(int argc, char **argv)
    +{
    +    //
    +    // Suppose that you have a constant stream of incoming data, and you want
    +    // to regularly perform singular spectral analysis of this stream.
    +    //
    +    // One full run of direct algorithm costs O(N*Width^2) operations, so
    +    // the more points you have, the more it costs to rebuild basis from
    +    // scratch.
    +    // 
    +    // Luckily we have incremental SSA algorithm which can perform quick
    +    // updates of already computed basis in O(K*Width^2) ops, where K
    +    // is a number of singular vectors extracted. Usually it is orders of
    +    // magnitude faster than full update of the basis.
    +    //
    +    // In this example we start from some initial dataset x0. Then we
    +    // start appending elements one by one to the end of the last sequence.
    +    //
    +    // NOTE: direct algorithm also supports incremental updates, but
    +    //       with O(Width^3) cost. Typically K<<Width, so specialized
    +    //       incremental algorithm is still faster.
    +    //
    +    ssamodel s1;
    +    real_2d_array a1;
    +    real_1d_array sv1;
    +    ae_int_t w;
    +    ae_int_t k;
    +    real_1d_array x0 = "[0.009,0.976,1.999,2.984,3.977,5.002]";
    +    ssacreate(s1);
    +    ssasetwindow(s1, 3);
    +    ssaaddsequence(s1, x0);
    +
    +    // set algorithm to the real-time version of top-K, K=2
    +    ssasetalgotopkrealtime(s1, 2);
    +
    +    // one more interesting feature of the incremental algorithm is "power-up" cycle.
    +    // even with incremental algorithm initial basis calculation costs O(N*Width^2) ops.
    +    // if such startup cost is too high for your real-time app, then you may divide
    +    // initial basis calculation across several model updates. It results in better
    +    // latency at the price of somewhat lesser precision during first few updates.
    +    ssasetpoweruplength(s1, 3);
    +
    +    // now, after we prepared everything, start to add incoming points one by one;
    +    // in the real life, of course, we will perform some work between subsequent update
    +    // (analyze something, predict, and so on).
    +    //
    +    // After each append we perform one iteration of the real-time solver. Usually
    +    // one iteration is more than enough to update basis. If you have REALLY tight
    +    // performance constraints, you may specify fractional amount of iterations,
    +    // which means that iteration is performed with required probability.
    +    double updateits = 1.0;
    +    ssaappendpointandupdate(s1, 5.951, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 7.074, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 7.925, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 8.992, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 9.942, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 11.051, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 11.965, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 13.047, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    ssaappendpointandupdate(s1, 13.970, updateits);
    +    ssagetbasis(s1, a1, sv1, w, k);
    +
    +    // Ok, we have our basis in a1[] and singular values at sv1[].
    +    // But is it good enough? Let's print it.
    +    printf("%s\n", a1.tostring(3).c_str()); // EXPECTED: [[0.510607,0.753611],[0.575201,0.058445],[0.639081,-0.654717]]
    +
    +    // Ok, two vectors with 3 components each.
    +    // But how to understand that is it really good basis?
    +    // Let's compare it with direct SSA algorithm on the entire sequence.
    +    ssamodel s2;
    +    real_2d_array a2;
    +    real_1d_array sv2;
    +    real_1d_array x2 = "[0.009,0.976,1.999,2.984,3.977,5.002,5.951,7.074,7.925,8.992,9.942,11.051,11.965,13.047,13.970]";
    +    ssacreate(s2);
    +    ssasetwindow(s2, 3);
    +    ssaaddsequence(s2, x2);
    +    ssasetalgotopkdirect(s2, 2);
    +    ssagetbasis(s2, a2, sv2, w, k);
    +
    +    // it is exactly the same as one calculated with incremental approach!
    +    printf("%s\n", a2.tostring(3).c_str()); // EXPECTED: [[0.510607,0.753611],[0.575201,0.058445],[0.639081,-0.654717]]
         return 0;
     }
     
    @@ -45177,7 +57514,8 @@
         double median,
         double& bothtails,
         double& lefttail,
    -    double& righttail);
    +    double& righttail,
    +    const xparams _params = alglib::xdefault);
     
     
    @@ -45207,7 +57545,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::invstudenttdistribution(ae_int_t k, double p); +
    double alglib::invstudenttdistribution( + ae_int_t k, + double p, + const xparams _params = alglib::xdefault);
    @@ -45253,7 +57594,10 @@ Cephes Math Library Release 2.8: June, 2000 Copyright 1984, 1987, 1995, 2000 by Stephen L. Moshier *************************************************************************/ -
    double alglib::studenttdistribution(ae_int_t k, double t); +
    double alglib::studenttdistribution( + ae_int_t k, + double t, + const xparams _params = alglib::xdefault);
    @@ -45315,7 +57659,8 @@ double mean, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -45368,7 +57713,8 @@ ae_int_t m, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -45423,7 +57769,8 @@ ae_int_t m, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -45438,22 +57785,13 @@
    /************************************************************************* Singular value decomposition of a rectangular matrix. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. - ! - ! Multithreaded acceleration is only partially supported (some parts are - ! optimized, but most - are not). + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -45478,7 +57816,7 @@ VTNeeded - 0, 1 or 2. See the description of the parameter VT. AdditionalMemory - If the parameter: - * equals 0, the algorithm doesn’t use additional + * equals 0, the algorithm doesn't use additional memory (lower requirements, lower performance). * equals 1, the algorithm uses additional memory of size min(M,N)*min(M,N) of real numbers. @@ -45497,7 +57835,7 @@ within [0..M-1, 0..Min(M,N)-1]. if UNeeded=2, U contains matrix U wholly. Array whose indexes range within [0..M-1, 0..M-1]. - VT - if VTNeeded=0, VT isn’t changed, the right singular vectors + VT - if VTNeeded=0, VT isn't changed, the right singular vectors are not calculated. if VTNeeded=1, VT contains right singular vectors (first min(M,N) rows of matrix V^T). Array whose indexes range @@ -45517,17 +57855,8 @@ ae_int_t additionalmemory, real_1d_array& w, real_2d_array& u, - real_2d_array& vt); -bool alglib::smp_rmatrixsvd( - real_2d_array a, - ae_int_t m, - ae_int_t n, - ae_int_t uneeded, - ae_int_t vtneeded, - ae_int_t additionalmemory, - real_1d_array& w, - real_2d_array& u, - real_2d_array& vt); + real_2d_array& vt, + const xparams _params = alglib::xdefault);
    @@ -45537,6 +57866,7 @@ hpdmatrixcholesky
    rmatrixlu
    sparsecholeskyskyline
    +sparselu
    spdmatrixcholesky
    spdmatrixcholeskyupdateadd1
    spdmatrixcholeskyupdateadd1buf
    @@ -45556,42 +57886,14 @@ * P = P0*P1*...*PK, K=min(M,N)-1, Pi - permutation matrix for I and Pivots[I] -This is cache-oblivous implementation of LU decomposition. It is optimized -for square matrices. As for rectangular matrices: -* best case - M>>N -* worst case - N>>M, small M, large N, matrix does not fit in CPU cache - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -45618,12 +57920,8 @@ complex_2d_array& a, ae_int_t m, ae_int_t n, - integer_1d_array& pivots); -void alglib::smp_cmatrixlu( - complex_2d_array& a, - ae_int_t m, - ae_int_t n, - integer_1d_array& pivots); + integer_1d_array& pivots, + const xparams _params = alglib::xdefault); @@ -45633,38 +57931,16 @@ The algorithm computes Cholesky decomposition of a Hermitian positive- definite matrix. The result of an algorithm is a representation of A as -A=U'*U or A=L*L' (here X' detones conj(X^T)). - -COMMERCIAL EDITION OF ALGLIB: +A=U'*U or A=L*L' (here X' denotes conj(X^T)). - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -45689,17 +57965,14 @@ in such case. -- ALGLIB routine -- - 15.12.2009 + 15.12.2009-22.01.2018 Bochkanov Sergey *************************************************************************/
    bool alglib::hpdmatrixcholesky( complex_2d_array& a, ae_int_t n, - bool isupper); -bool alglib::smp_hpdmatrixcholesky( - complex_2d_array& a, - ae_int_t n, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault);
    @@ -45713,42 +57986,14 @@ * P = P0*P1*...*PK, K=min(M,N)-1, Pi - permutation matrix for I and Pivots[I] -This is cache-oblivous implementation of LU decomposition. -It is optimized for square matrices. As for rectangular matrices: -* best case - M>>N -* worst case - N>>M, small M, large N, matrix does not fit in CPU cache - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that LU decomposition is harder to - ! parallelize than, say, matrix-matrix product - this algorithm has - ! many internal synchronization points which can not be avoided. However - ! parallelism starts to be profitable starting from N=1024, achieving - ! near-linear speedup for N=4096 or higher. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -45775,12 +58020,8 @@ real_2d_array& a, ae_int_t m, ae_int_t n, - integer_1d_array& pivots); -void alglib::smp_rmatrixlu( - real_2d_array& a, - ae_int_t m, - ae_int_t n, - integer_1d_array& pivots); + integer_1d_array& pivots, + const xparams _params = alglib::xdefault); @@ -45832,7 +58073,62 @@
    bool alglib::sparsecholeskyskyline( sparsematrix a, ae_int_t n, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault); + +
    + +
    +
    /************************************************************************* +Sparse LU decomposition with column pivoting for sparsity and row pivoting +for stability. Input must be square sparse matrix stored in CRS format. + +The algorithm computes LU decomposition of a general square matrix +(rectangular ones are not supported). The result of an algorithm is a +representation of A as A = P*L*U*Q, where: +* L is lower unitriangular matrix +* U is upper triangular matrix +* P = P0*P1*...*PK, K=N-1, Pi - permutation matrix for I and P[I] +* Q = QK*...*Q1*Q0, K=N-1, Qi - permutation matrix for I and Q[I] + +This function pivots columns for higher sparsity, and then pivots rows for +stability (larger element at the diagonal). + +INPUT PARAMETERS: + A - sparse NxN matrix in CRS format. An exception is generated + if matrix is non-CRS or non-square. + PivotType- pivoting strategy: + * 0 for best pivoting available (2 in current version) + * 1 for row-only pivoting (NOT RECOMMENDED) + * 2 for complete pivoting which produces most sparse outputs + +OUTPUT PARAMETERS: + A - the result of factorization, matrices L and U stored in + compact form using CRS sparse storage format: + * lower unitriangular L is stored strictly under main diagonal + * upper triangilar U is stored ON and ABOVE main diagonal + P - row permutation matrix in compact form, array[N] + Q - col permutation matrix in compact form, array[N] + +This function always succeeds, i.e. it ALWAYS returns valid factorization, +but for your convenience it also returns boolean value which helps to +detect symbolically degenerate matrices: +* function returns TRUE, if the matrix was factorized AND symbolically + non-degenerate +* function returns FALSE, if the matrix was factorized but U has strictly + zero elements at the diagonal (the factorization is returned anyway). + + + -- ALGLIB routine -- + 03.09.2018 + Bochkanov Sergey +*************************************************************************/ +
    bool alglib::sparselu( + sparsematrix a, + ae_int_t pivottype, + integer_1d_array& p, + integer_1d_array& q, + const xparams _params = alglib::xdefault);
    @@ -45844,36 +58140,14 @@ definite matrix. The result of an algorithm is a representation of A as A=U^T*U or A=L*L^T -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Say, on SSE2-capable CPU with N=1024, HPC ALGLIB will be: - ! * about 2-3x faster than ALGLIB for C++ without MKL - ! * about 7-10x faster than "pure C#" edition of ALGLIB - ! Difference in performance will be more striking on newer CPU's with - ! support for newer SIMD instructions. Generally, MKL accelerates any - ! problem whose size is at least 128, with best efficiency achieved for - ! N's larger than 512. - ! - ! Commercial edition of ALGLIB also supports multithreaded acceleration - ! of this function. We should note that Cholesky decomposition is harder - ! to parallelize than, say, matrix-matrix product - this algorithm has - ! several synchronization points which can not be avoided. However, - ! parallelism starts to be profitable starting from N=500. - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- @@ -45904,11 +58178,8 @@
    bool alglib::spdmatrixcholesky( real_2d_array& a, ae_int_t n, - bool isupper); -bool alglib::smp_spdmatrixcholesky( - real_2d_array& a, - ae_int_t n, - bool isupper); + bool isupper, + const xparams _params = alglib::xdefault);
    @@ -45954,7 +58225,8 @@ real_2d_array& a, ae_int_t n, bool isupper, - real_1d_array u); + real_1d_array u, + const xparams _params = alglib::xdefault); @@ -45993,7 +58265,8 @@ ae_int_t n, bool isupper, real_1d_array u, - real_1d_array& bufr); + real_1d_array& bufr, + const xparams _params = alglib::xdefault); @@ -46060,7 +58333,8 @@ real_2d_array& a, ae_int_t n, bool isupper, - boolean_1d_array fix); + boolean_1d_array fix, + const xparams _params = alglib::xdefault); @@ -46099,7 +58373,8 @@ ae_int_t n, bool isupper, boolean_1d_array fix, - real_1d_array& bufr); + real_1d_array& bufr, + const xparams _params = alglib::xdefault); @@ -46155,7 +58430,8 @@
    void alglib::hyperbolicsinecosineintegrals( double x, double& shi, - double& chi); + double& chi, + const xparams _params = alglib::xdefault);
    @@ -46199,7 +58475,11 @@ Cephes Math Library Release 2.1: January, 1989 Copyright 1984, 1987, 1989 by Stephen L. Moshier *************************************************************************/ -
    void alglib::sinecosineintegrals(double x, double& si, double& ci); +
    void alglib::sinecosineintegrals( + double x, + double& si, + double& ci, + const xparams _params = alglib::xdefault);
    @@ -46255,7 +58535,8 @@ ae_int_t m, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -46300,7 +58581,8 @@ double variance, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -46367,7 +58649,8 @@ double e, double& bothtails, double& lefttail, - double& righttail); + double& righttail, + const xparams _params = alglib::xdefault); @@ -46437,7 +58720,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugb1appendcopy(boolean_1d_array& a); +
    void alglib::xdebugb1appendcopy( + boolean_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46451,7 +58736,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::xdebugb1count(boolean_1d_array a); +
    ae_int_t alglib::xdebugb1count( + boolean_1d_array a, + const xparams _params = alglib::xdefault);
    @@ -46466,7 +58753,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugb1not(boolean_1d_array& a); +
    void alglib::xdebugb1not( + boolean_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46481,7 +58770,10 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugb1outeven(ae_int_t n, boolean_1d_array& a); +
    void alglib::xdebugb1outeven( + ae_int_t n, + boolean_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46495,7 +58787,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::xdebugb2count(boolean_2d_array a); +
    ae_int_t alglib::xdebugb2count( + boolean_2d_array a, + const xparams _params = alglib::xdefault);
    @@ -46510,7 +58804,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugb2not(boolean_2d_array& a); +
    void alglib::xdebugb2not( + boolean_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46525,7 +58821,11 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugb2outsin(ae_int_t m, ae_int_t n, boolean_2d_array& a); +
    void alglib::xdebugb2outsin( + ae_int_t m, + ae_int_t n, + boolean_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46540,7 +58840,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugb2transpose(boolean_2d_array& a); +
    void alglib::xdebugb2transpose( + boolean_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46555,7 +58857,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugc1appendcopy(complex_1d_array& a); +
    void alglib::xdebugc1appendcopy( + complex_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46570,7 +58874,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugc1neg(complex_1d_array& a); +
    void alglib::xdebugc1neg( + complex_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46587,7 +58893,10 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugc1outeven(ae_int_t n, complex_1d_array& a); +
    void alglib::xdebugc1outeven( + ae_int_t n, + complex_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46601,7 +58910,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    alglib::complex alglib::xdebugc1sum(complex_1d_array a); +
    alglib::complex alglib::xdebugc1sum( + complex_1d_array a, + const xparams _params = alglib::xdefault);
    @@ -46616,7 +58927,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugc2neg(complex_2d_array& a); +
    void alglib::xdebugc2neg( + complex_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46634,7 +58947,8 @@
    void alglib::xdebugc2outsincos( ae_int_t m, ae_int_t n, - complex_2d_array& a); + complex_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46648,7 +58962,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    alglib::complex alglib::xdebugc2sum(complex_2d_array a); +
    alglib::complex alglib::xdebugc2sum( + complex_2d_array a, + const xparams _params = alglib::xdefault);
    @@ -46663,7 +58979,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugc2transpose(complex_2d_array& a); +
    void alglib::xdebugc2transpose( + complex_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46678,7 +58996,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugi1appendcopy(integer_1d_array& a); +
    void alglib::xdebugi1appendcopy( + integer_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46693,7 +59013,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugi1neg(integer_1d_array& a); +
    void alglib::xdebugi1neg( + integer_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46710,7 +59032,10 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugi1outeven(ae_int_t n, integer_1d_array& a); +
    void alglib::xdebugi1outeven( + ae_int_t n, + integer_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46724,7 +59049,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::xdebugi1sum(integer_1d_array a); +
    ae_int_t alglib::xdebugi1sum( + integer_1d_array a, + const xparams _params = alglib::xdefault);
    @@ -46739,7 +59066,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugi2neg(integer_2d_array& a); +
    void alglib::xdebugi2neg( + integer_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46754,7 +59083,11 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugi2outsin(ae_int_t m, ae_int_t n, integer_2d_array& a); +
    void alglib::xdebugi2outsin( + ae_int_t m, + ae_int_t n, + integer_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46768,7 +59101,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    ae_int_t alglib::xdebugi2sum(integer_2d_array a); +
    ae_int_t alglib::xdebugi2sum( + integer_2d_array a, + const xparams _params = alglib::xdefault);
    @@ -46783,7 +59118,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugi2transpose(integer_2d_array& a); +
    void alglib::xdebugi2transpose( + integer_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46799,7 +59136,9 @@ -- ALGLIB -- Copyright 27.05.2014 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebuginitrecord1(xdebugrecord1& rec1); +
    void alglib::xdebuginitrecord1( + xdebugrecord1& rec1, + const xparams _params = alglib::xdefault);
    @@ -46818,7 +59157,8 @@ ae_int_t n, real_2d_array a, real_2d_array b, - boolean_2d_array c); + boolean_2d_array c, + const xparams _params = alglib::xdefault); @@ -46833,7 +59173,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugr1appendcopy(real_1d_array& a); +
    void alglib::xdebugr1appendcopy( + real_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46848,7 +59190,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugr1neg(real_1d_array& a); +
    void alglib::xdebugr1neg( + real_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46865,7 +59209,10 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugr1outeven(ae_int_t n, real_1d_array& a); +
    void alglib::xdebugr1outeven( + ae_int_t n, + real_1d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46879,7 +59226,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    double alglib::xdebugr1sum(real_1d_array a); +
    double alglib::xdebugr1sum( + real_1d_array a, + const xparams _params = alglib::xdefault);
    @@ -46894,7 +59243,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugr2neg(real_2d_array& a); +
    void alglib::xdebugr2neg( + real_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46909,7 +59260,11 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugr2outsin(ae_int_t m, ae_int_t n, real_2d_array& a); +
    void alglib::xdebugr2outsin( + ae_int_t m, + ae_int_t n, + real_2d_array& a, + const xparams _params = alglib::xdefault);
    @@ -46923,7 +59278,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    double alglib::xdebugr2sum(real_2d_array a); +
    double alglib::xdebugr2sum( + real_2d_array a, + const xparams _params = alglib::xdefault);
    @@ -46938,7 +59295,9 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -
    void alglib::xdebugr2transpose(real_2d_array& a); +
    void alglib::xdebugr2transpose( + real_2d_array& a, + const xparams _params = alglib::xdefault);
    diff -Nru alglib-3.10.0/src/alglibinternal.cpp alglib-3.16.0/src/alglibinternal.cpp --- alglib-3.10.0/src/alglibinternal.cpp 2015-08-19 12:24:21.000000000 +0000 +++ alglib-3.16.0/src/alglibinternal.cpp 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -17,17 +17,20 @@ http://www.fsf.org/licensing/licenses >>> END OF LICENSE >>> *************************************************************************/ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS +#endif #include "stdafx.h" #include "alglibinternal.h" // disable some irrelevant warnings -#if (AE_COMPILER==AE_MSVC) +#if (AE_COMPILER==AE_MSVC) && !defined(AE_ALL_WARNINGS) #pragma warning(disable:4100) #pragma warning(disable:4127) +#pragma warning(disable:4611) #pragma warning(disable:4702) #pragma warning(disable:4996) #endif -using namespace std; ///////////////////////////////////////////////////////////////////////// // @@ -47,10 +50,15 @@ ///////////////////////////////////////////////////////////////////////// namespace alglib_impl { +#if defined(AE_COMPILE_SCODES) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_APSERV) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_TSORT) || !defined(AE_PARTIAL_BUILD) static void tsort_tagsortfastirec(/* Real */ ae_vector* a, /* Integer */ ae_vector* b, /* Real */ ae_vector* bufa, @@ -72,58 +80,28 @@ ae_state *_state); +#endif +#if defined(AE_COMPILE_ABLASMKL) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_ABLASF) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_CREFLECTIONS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_ROTATIONS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_TRLINSOLVE) || !defined(AE_PARTIAL_BUILD) - - - - - - - - -static void hsschur_internalauxschur(ae_bool wantt, - ae_bool wantz, - ae_int_t n, - ae_int_t ilo, - ae_int_t ihi, - /* Real */ ae_matrix* h, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - ae_int_t iloz, - ae_int_t ihiz, - /* Real */ ae_matrix* z, - /* Real */ ae_vector* work, - /* Real */ ae_vector* workv3, - /* Real */ ae_vector* workc1, - /* Real */ ae_vector* works1, - ae_int_t* info, - ae_state *_state); -static void hsschur_aux2x2schur(double* a, - double* b, - double* c, - double* d, - double* rt1r, - double* rt1i, - double* rt2r, - double* rt2i, - double* cs, - double* sn, - ae_state *_state); -static double hsschur_extschursign(double a, double b, ae_state *_state); -static ae_int_t hsschur_extschursigntoone(double b, ae_state *_state); - - - - +#endif +#if defined(AE_COMPILE_SAFESOLVE) || !defined(AE_PARTIAL_BUILD) static ae_bool safesolve_cbasicsolveandupdate(ae_complex alpha, ae_complex beta, double lnmax, @@ -134,25 +112,20 @@ ae_state *_state); -static ae_bool hpccores_hpcpreparechunkedgradientx(/* Real */ ae_vector* weights, - ae_int_t wcount, - /* Real */ ae_vector* hpcbuf, - ae_state *_state); -static ae_bool hpccores_hpcfinalizechunkedgradientx(/* Real */ ae_vector* buf, - ae_int_t wcount, - /* Real */ ae_vector* grad, - ae_state *_state); +#endif +#if defined(AE_COMPILE_HBLAS) || !defined(AE_PARTIAL_BUILD) -static void xblas_xsum(/* Real */ ae_vector* w, - double mx, - ae_int_t n, - double* r, - double* rerr, - ae_state *_state); -static double xblas_xfastpow(double r, ae_int_t n, ae_state *_state); +#endif +#if defined(AE_COMPILE_SBLAS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_BLAS) || !defined(AE_PARTIAL_BUILD) + + +#endif +#if defined(AE_COMPILE_LINMIN) || !defined(AE_PARTIAL_BUILD) static double linmin_ftol = 0.001; static double linmin_xtol = 100*ae_machineepsilon; static ae_int_t linmin_maxfev = 20; @@ -175,6 +148,35 @@ ae_state *_state); +#endif +#if defined(AE_COMPILE_XBLAS) || !defined(AE_PARTIAL_BUILD) +static void xblas_xsum(/* Real */ ae_vector* w, + double mx, + ae_int_t n, + double* r, + double* rerr, + ae_state *_state); +static double xblas_xfastpow(double r, ae_int_t n, ae_state *_state); + + +#endif +#if defined(AE_COMPILE_BASICSTATOPS) || !defined(AE_PARTIAL_BUILD) + + +#endif +#if defined(AE_COMPILE_HPCCORES) || !defined(AE_PARTIAL_BUILD) +static ae_bool hpccores_hpcpreparechunkedgradientx(/* Real */ ae_vector* weights, + ae_int_t wcount, + /* Real */ ae_vector* hpcbuf, + ae_state *_state); +static ae_bool hpccores_hpcfinalizechunkedgradientx(/* Real */ ae_vector* buf, + ae_int_t wcount, + /* Real */ ae_vector* grad, + ae_state *_state); + + +#endif +#if defined(AE_COMPILE_NTHEORY) || !defined(AE_PARTIAL_BUILD) static ae_bool ntheory_isprime(ae_int_t n, ae_state *_state); static ae_int_t ntheory_modmul(ae_int_t a, ae_int_t b, @@ -186,6 +188,8 @@ ae_state *_state); +#endif +#if defined(AE_COMPILE_FTBASE) || !defined(AE_PARTIAL_BUILD) static ae_int_t ftbase_coltype = 0; static ae_int_t ftbase_coloperandscnt = 1; static ae_int_t ftbase_coloperandsize = 2; @@ -361,37 +365,103 @@ ae_state *_state); +#endif +#if defined(AE_COMPILE_NEARUNITYUNIT) || !defined(AE_PARTIAL_BUILD) + + +#endif +#if defined(AE_COMPILE_ALGLIBBASICS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_SCODES) || !defined(AE_PARTIAL_BUILD) +ae_int_t getrdfserializationcode(ae_state *_state) +{ + ae_int_t result; -/************************************************************************* -This function is used to set error flags during unit tests. When COND -parameter is True, FLAG variable is set to True. When COND is False, -FLAG is unchanged. + result = 1; + return result; +} -The purpose of this function is to have single point where failures of -unit tests can be detected. -This function returns value of COND. -*************************************************************************/ -ae_bool seterrorflag(ae_bool* flag, ae_bool cond, ae_state *_state) +ae_int_t getkdtreeserializationcode(ae_state *_state) { - ae_bool result; + ae_int_t result; - if( cond ) - { - *flag = ae_true; - } - result = cond; + result = 2; + return result; +} + + +ae_int_t getmlpserializationcode(ae_state *_state) +{ + ae_int_t result; + + + result = 3; + return result; +} + + +ae_int_t getmlpeserializationcode(ae_state *_state) +{ + ae_int_t result; + + + result = 4; + return result; +} + + +ae_int_t getrbfserializationcode(ae_state *_state) +{ + ae_int_t result; + + + result = 5; return result; } +ae_int_t getspline2dserializationcode(ae_state *_state) +{ + ae_int_t result; + + + result = 6; + return result; +} + + +ae_int_t getidwserializationcode(ae_state *_state) +{ + ae_int_t result; + + + result = 7; + return result; +} + + +ae_int_t getknnserializationcode(ae_state *_state) +{ + ae_int_t result; + + + result = 108; + return result; +} + + +#endif +#if defined(AE_COMPILE_APSERV) || !defined(AE_PARTIAL_BUILD) + + /************************************************************************* Internally calls SetErrorFlag() with condition: @@ -403,17 +473,32 @@ This function returns value of COND. *************************************************************************/ -ae_bool seterrorflagdiff(ae_bool* flag, +void seterrorflagdiff(ae_bool* flag, double val, double refval, double tol, double s, ae_state *_state) { + + + ae_set_error_flag(flag, ae_fp_greater(ae_fabs(val-refval, _state),tol*ae_maxreal(ae_fabs(refval, _state), s, _state)), __FILE__, __LINE__, "apserv.ap:162"); +} + + +/************************************************************************* +The function always returns False. +It may be used sometimes to prevent spurious warnings. + + -- ALGLIB -- + Copyright 17.09.2012 by Bochkanov Sergey +*************************************************************************/ +ae_bool alwaysfalse(ae_state *_state) +{ ae_bool result; - result = seterrorflag(flag, ae_fp_greater(ae_fabs(val-refval, _state),tol*ae_maxreal(ae_fabs(refval, _state), s, _state)), _state); + result = ae_false; return result; } @@ -471,6 +556,28 @@ /************************************************************************* +The function performs zero-coalescing on integer value. + +NOTE: no check is performed for B<>0 + + -- ALGLIB -- + Copyright 18.05.2015 by Bochkanov Sergey +*************************************************************************/ +ae_int_t coalescei(ae_int_t a, ae_int_t b, ae_state *_state) +{ + ae_int_t result; + + + result = a; + if( a==0 ) + { + result = b; + } + return result; +} + + +/************************************************************************* The function convert integer value to real value. -- ALGLIB -- @@ -506,6 +613,24 @@ /************************************************************************* This function compares two numbers for approximate equality, with tolerance +to errors as large as tol. + + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +ae_bool approxequal(double a, double b, double tol, ae_state *_state) +{ + ae_bool result; + + + result = ae_fp_less_eq(ae_fabs(a-b, _state),tol); + return result; +} + + +/************************************************************************* +This function compares two numbers for approximate equality, with tolerance to errors as large as max(|a|,|b|)*tol. @@ -790,6 +915,28 @@ /************************************************************************* +Resizes X and fills by zeros + + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey +*************************************************************************/ +void setlengthzero(/* Real */ ae_vector* x, + ae_int_t n, + ae_state *_state) +{ + ae_int_t i; + + + ae_assert(n>=0, "SetLengthZero: N<0", _state); + ae_vector_set_length(x, n, _state); + for(i=0; i<=n-1; i++) + { + x->ptr.p_double[i] = (double)(0); + } +} + + +/************************************************************************* If Length(X)rows; - n2 = x->cols; - ae_swap_matrices(x, &oldx); - ae_matrix_set_length(x, m, n, _state); - for(i=0; i<=m-1; i++) + if( m>0&&n>0 ) { - for(j=0; j<=n-1; j++) + if( x->rowscolsptr.pp_double[i][j] = oldx.ptr.pp_double[i][j]; - } - else - { - x->ptr.pp_double[i][j] = 0.0; - } + ae_matrix_set_length(x, m, n, _state); } } - ae_frame_leave(_state); } /************************************************************************* -Resizes X and: -* preserves old contents of X -* fills new elements by zeros +Grows X, i.e. changes its size in such a way that: +a) contents is preserved +b) new size is at least N +c) new size can be larger than N, so subsequent grow() calls can return + without reallocation -- ALGLIB -- Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -void imatrixresize(/* Integer */ ae_matrix* x, - ae_int_t m, +void bvectorgrowto(/* Boolean */ ae_vector* x, ae_int_t n, ae_state *_state) { ae_frame _frame_block; - ae_matrix oldx; + ae_vector oldx; ae_int_t i; - ae_int_t j; - ae_int_t m2; ae_int_t n2; ae_frame_make(_state, &_frame_block); - ae_matrix_init(&oldx, 0, 0, DT_INT, _state); + memset(&oldx, 0, sizeof(oldx)); + ae_vector_init(&oldx, 0, DT_BOOL, _state, ae_true); - m2 = x->rows; - n2 = x->cols; - ae_swap_matrices(x, &oldx); - ae_matrix_set_length(x, m, n, _state); - for(i=0; i<=m-1; i++) + + /* + * Enough place + */ + if( x->cnt>=n ) { - for(j=0; j<=n-1; j++) + ae_frame_leave(_state); + return; + } + + /* + * Choose new size + */ + n = ae_maxint(n, ae_round(1.8*x->cnt+1, _state), _state); + + /* + * Grow + */ + n2 = x->cnt; + ae_swap_vectors(x, &oldx); + ae_vector_set_length(x, n, _state); + for(i=0; i<=n-1; i++) + { + if( iptr.pp_int[i][j] = oldx.ptr.pp_int[i][j]; - } - else - { - x->ptr.pp_int[i][j] = 0; - } + x->ptr.p_bool[i] = oldx.ptr.p_bool[i]; + } + else + { + x->ptr.p_bool[i] = ae_false; } } ae_frame_leave(_state); @@ -960,1269 +1099,1373 @@ /************************************************************************* -This function checks that length(X) is at least N and first N values from -X[] are finite +Grows X, i.e. changes its size in such a way that: +a) contents is preserved +b) new size is at least N +c) new size can be larger than N, so subsequent grow() calls can return + without reallocation -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool isfinitevector(/* Real */ ae_vector* x, +void ivectorgrowto(/* Integer */ ae_vector* x, ae_int_t n, ae_state *_state) { + ae_frame _frame_block; + ae_vector oldx; ae_int_t i; - ae_bool result; + ae_int_t n2; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_vector_init(&oldx, 0, DT_INT, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteVector: internal error (N<0)", _state); - if( n==0 ) - { - result = ae_true; - return result; - } - if( x->cntcnt>=n ) { - result = ae_false; - return result; + ae_frame_leave(_state); + return; } + + /* + * Choose new size + */ + n = ae_maxint(n, ae_round(1.8*x->cnt+1, _state), _state); + + /* + * Grow + */ + n2 = x->cnt; + ae_swap_vectors(x, &oldx); + ae_vector_set_length(x, n, _state); for(i=0; i<=n-1; i++) { - if( !ae_isfinite(x->ptr.p_double[i], _state) ) + if( iptr.p_int[i] = oldx.ptr.p_int[i]; + } + else + { + x->ptr.p_int[i] = 0; } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -This function checks that first N values from X[] are finite +Grows X, i.e. appends rows in such a way that: +a) contents is preserved +b) new row count is at least N +c) new row count can be larger than N, so subsequent grow() calls can return + without reallocation +d) new matrix has at least MinCols columns (if less than specified amount + of columns is present, new columns are added with undefined contents); + MinCols can be 0 or negative value = ignored -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool isfinitecvector(/* Complex */ ae_vector* z, +void rmatrixgrowrowsto(/* Real */ ae_matrix* a, ae_int_t n, + ae_int_t mincols, ae_state *_state) { + ae_frame _frame_block; + ae_matrix olda; ae_int_t i; - ae_bool result; + ae_int_t j; + ae_int_t n2; + ae_int_t m; + ae_frame_make(_state, &_frame_block); + memset(&olda, 0, sizeof(olda)); + ae_matrix_init(&olda, 0, 0, DT_REAL, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteCVector: internal error (N<0)", _state); - for(i=0; i<=n-1; i++) + + /* + * Enough place? + */ + if( a->rows>=n&&a->cols>=mincols ) { - if( !ae_isfinite(z->ptr.p_complex[i].x, _state)||!ae_isfinite(z->ptr.p_complex[i].y, _state) ) + ae_frame_leave(_state); + return; + } + + /* + * Sizes and metrics + */ + if( a->rowsrows+1, _state), _state); + } + n2 = ae_minint(a->rows, n, _state); + m = a->cols; + + /* + * Grow + */ + ae_swap_matrices(a, &olda); + ae_matrix_set_length(a, n, ae_maxint(m, mincols, _state), _state); + for(i=0; i<=n2-1; i++) + { + for(j=0; j<=m-1; j++) { - result = ae_false; - return result; + a->ptr.pp_double[i][j] = olda.ptr.pp_double[i][j]; } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -This function checks that size of X is at least MxN and values from -X[0..M-1,0..N-1] are finite. +Grows X, i.e. appends cols in such a way that: +a) contents is preserved +b) new col count is at least N +c) new col count can be larger than N, so subsequent grow() calls can return + without reallocation +d) new matrix has at least MinRows row (if less than specified amount + of rows is present, new rows are added with undefined contents); + MinRows can be 0 or negative value = ignored -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool apservisfinitematrix(/* Real */ ae_matrix* x, - ae_int_t m, +void rmatrixgrowcolsto(/* Real */ ae_matrix* a, ae_int_t n, + ae_int_t minrows, ae_state *_state) { + ae_frame _frame_block; + ae_matrix olda; ae_int_t i; ae_int_t j; - ae_bool result; + ae_int_t n2; + ae_int_t m; + ae_frame_make(_state, &_frame_block); + memset(&olda, 0, sizeof(olda)); + ae_matrix_init(&olda, 0, 0, DT_REAL, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteMatrix: internal error (N<0)", _state); - ae_assert(m>=0, "APSERVIsFiniteMatrix: internal error (M<0)", _state); - if( m==0||n==0 ) + + /* + * Enough place? + */ + if( a->cols>=n&&a->rows>=minrows ) { - result = ae_true; - return result; + ae_frame_leave(_state); + return; } - if( x->rowscolscolscols+1, _state), _state); } + n2 = ae_minint(a->cols, n, _state); + m = a->rows; + + /* + * Grow + */ + ae_swap_matrices(a, &olda); + ae_matrix_set_length(a, ae_maxint(m, minrows, _state), n, _state); for(i=0; i<=m-1; i++) { - for(j=0; j<=n-1; j++) + for(j=0; j<=n2-1; j++) { - if( !ae_isfinite(x->ptr.pp_double[i][j], _state) ) - { - result = ae_false; - return result; - } + a->ptr.pp_double[i][j] = olda.ptr.pp_double[i][j]; } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -This function checks that all values from X[0..M-1,0..N-1] are finite +Grows X, i.e. changes its size in such a way that: +a) contents is preserved +b) new size is at least N +c) new size can be larger than N, so subsequent grow() calls can return + without reallocation -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool apservisfinitecmatrix(/* Complex */ ae_matrix* x, - ae_int_t m, +void rvectorgrowto(/* Real */ ae_vector* x, ae_int_t n, ae_state *_state) { + ae_frame _frame_block; + ae_vector oldx; ae_int_t i; - ae_int_t j; - ae_bool result; + ae_int_t n2; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_vector_init(&oldx, 0, DT_REAL, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteCMatrix: internal error (N<0)", _state); - ae_assert(m>=0, "APSERVIsFiniteCMatrix: internal error (M<0)", _state); - for(i=0; i<=m-1; i++) + + /* + * Enough place + */ + if( x->cnt>=n ) { - for(j=0; j<=n-1; j++) + ae_frame_leave(_state); + return; + } + + /* + * Choose new size + */ + n = ae_maxint(n, ae_round(1.8*x->cnt+1, _state), _state); + + /* + * Grow + */ + n2 = x->cnt; + ae_swap_vectors(x, &oldx); + ae_vector_set_length(x, n, _state); + for(i=0; i<=n-1; i++) + { + if( iptr.pp_complex[i][j].x, _state)||!ae_isfinite(x->ptr.pp_complex[i][j].y, _state) ) - { - result = ae_false; - return result; - } + x->ptr.p_double[i] = oldx.ptr.p_double[i]; + } + else + { + x->ptr.p_double[i] = (double)(0); } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -This function checks that size of X is at least NxN and all values from -upper/lower triangle of X[0..N-1,0..N-1] are finite +Resizes X and: +* preserves old contents of X +* fills new elements by zeros -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool isfinitertrmatrix(/* Real */ ae_matrix* x, +void ivectorresize(/* Integer */ ae_vector* x, ae_int_t n, - ae_bool isupper, ae_state *_state) { + ae_frame _frame_block; + ae_vector oldx; ae_int_t i; - ae_int_t j1; - ae_int_t j2; - ae_int_t j; - ae_bool result; + ae_int_t n2; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_vector_init(&oldx, 0, DT_INT, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteRTRMatrix: internal error (N<0)", _state); - if( n==0 ) - { - result = ae_true; - return result; - } - if( x->rowscolscnt; + ae_swap_vectors(x, &oldx); + ae_vector_set_length(x, n, _state); for(i=0; i<=n-1; i++) { - if( isupper ) + if( iptr.p_int[i] = oldx.ptr.p_int[i]; } else { - j1 = 0; - j2 = i; - } - for(j=j1; j<=j2; j++) - { - if( !ae_isfinite(x->ptr.pp_double[i][j], _state) ) - { - result = ae_false; - return result; - } + x->ptr.p_int[i] = 0; } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -This function checks that all values from upper/lower triangle of -X[0..N-1,0..N-1] are finite +Resizes X and: +* preserves old contents of X +* fills new elements by zeros -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool apservisfinitectrmatrix(/* Complex */ ae_matrix* x, +void rvectorresize(/* Real */ ae_vector* x, ae_int_t n, - ae_bool isupper, ae_state *_state) { + ae_frame _frame_block; + ae_vector oldx; ae_int_t i; - ae_int_t j1; - ae_int_t j2; - ae_int_t j; - ae_bool result; + ae_int_t n2; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_vector_init(&oldx, 0, DT_REAL, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteCTRMatrix: internal error (N<0)", _state); + n2 = x->cnt; + ae_swap_vectors(x, &oldx); + ae_vector_set_length(x, n, _state); for(i=0; i<=n-1; i++) { - if( isupper ) + if( iptr.p_double[i] = oldx.ptr.p_double[i]; } else { - j1 = 0; - j2 = i; - } - for(j=j1; j<=j2; j++) - { - if( !ae_isfinite(x->ptr.pp_complex[i][j].x, _state)||!ae_isfinite(x->ptr.pp_complex[i][j].y, _state) ) - { - result = ae_false; - return result; - } + x->ptr.p_double[i] = (double)(0); } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -This function checks that all values from X[0..M-1,0..N-1] are finite or -NaN's. +Resizes X and: +* preserves old contents of X +* fills new elements by zeros -- ALGLIB -- - Copyright 18.06.2010 by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool apservisfiniteornanmatrix(/* Real */ ae_matrix* x, +void rmatrixresize(/* Real */ ae_matrix* x, ae_int_t m, ae_int_t n, ae_state *_state) { + ae_frame _frame_block; + ae_matrix oldx; ae_int_t i; ae_int_t j; - ae_bool result; + ae_int_t m2; + ae_int_t n2; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_matrix_init(&oldx, 0, 0, DT_REAL, _state, ae_true); - ae_assert(n>=0, "APSERVIsFiniteOrNaNMatrix: internal error (N<0)", _state); - ae_assert(m>=0, "APSERVIsFiniteOrNaNMatrix: internal error (M<0)", _state); + m2 = x->rows; + n2 = x->cols; + ae_swap_matrices(x, &oldx); + ae_matrix_set_length(x, m, n, _state); for(i=0; i<=m-1; i++) { for(j=0; j<=n-1; j++) { - if( !(ae_isfinite(x->ptr.pp_double[i][j], _state)||ae_isnan(x->ptr.pp_double[i][j], _state)) ) + if( iptr.pp_double[i][j] = oldx.ptr.pp_double[i][j]; + } + else + { + x->ptr.pp_double[i][j] = 0.0; } } } - result = ae_true; - return result; + ae_frame_leave(_state); } /************************************************************************* -Safe sqrt(x^2+y^2) +Resizes X and: +* preserves old contents of X +* fills new elements by zeros -- ALGLIB -- - Copyright by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -double safepythag2(double x, double y, ae_state *_state) +void imatrixresize(/* Integer */ ae_matrix* x, + ae_int_t m, + ae_int_t n, + ae_state *_state) { - double w; - double xabs; - double yabs; - double z; - double result; + ae_frame _frame_block; + ae_matrix oldx; + ae_int_t i; + ae_int_t j; + ae_int_t m2; + ae_int_t n2; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_matrix_init(&oldx, 0, 0, DT_INT, _state, ae_true); - xabs = ae_fabs(x, _state); - yabs = ae_fabs(y, _state); - w = ae_maxreal(xabs, yabs, _state); - z = ae_minreal(xabs, yabs, _state); - if( ae_fp_eq(z,(double)(0)) ) - { - result = w; - } - else + m2 = x->rows; + n2 = x->cols; + ae_swap_matrices(x, &oldx); + ae_matrix_set_length(x, m, n, _state); + for(i=0; i<=m-1; i++) { - result = w*ae_sqrt(1+ae_sqr(z/w, _state), _state); + for(j=0; j<=n-1; j++) + { + if( iptr.pp_int[i][j] = oldx.ptr.pp_int[i][j]; + } + else + { + x->ptr.pp_int[i][j] = 0; + } + } } - return result; + ae_frame_leave(_state); } /************************************************************************* -Safe sqrt(x^2+y^2) +appends element to X -- ALGLIB -- - Copyright by Bochkanov Sergey + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -double safepythag3(double x, double y, double z, ae_state *_state) +void ivectorappend(/* Integer */ ae_vector* x, + ae_int_t v, + ae_state *_state) { - double w; - double result; + ae_frame _frame_block; + ae_vector oldx; + ae_int_t i; + ae_int_t n; + ae_frame_make(_state, &_frame_block); + memset(&oldx, 0, sizeof(oldx)); + ae_vector_init(&oldx, 0, DT_INT, _state, ae_true); - w = ae_maxreal(ae_fabs(x, _state), ae_maxreal(ae_fabs(y, _state), ae_fabs(z, _state), _state), _state); - if( ae_fp_eq(w,(double)(0)) ) + n = x->cnt; + ae_swap_vectors(x, &oldx); + ae_vector_set_length(x, n+1, _state); + for(i=0; i<=n-1; i++) { - result = (double)(0); - return result; + x->ptr.p_int[i] = oldx.ptr.p_int[i]; } - x = x/w; - y = y/w; - z = z/w; - result = w*ae_sqrt(ae_sqr(x, _state)+ae_sqr(y, _state)+ae_sqr(z, _state), _state); - return result; + x->ptr.p_int[n] = v; + ae_frame_leave(_state); } /************************************************************************* -Safe division. - -This function attempts to calculate R=X/Y without overflow. - -It returns: -* +1, if abs(X/Y)>=MaxRealNumber or undefined - overflow-like situation - (no overlfow is generated, R is either NAN, PosINF, NegINF) -* 0, if MinRealNumber0 - (R contains result, may be zero) -* -1, if 00 - */ - if( ae_fp_eq(y,(double)(0)) ) + ae_assert(n>=0, "APSERVIsFiniteVector: internal error (N<0)", _state); + if( n==0 ) { - result = 1; - if( ae_fp_eq(x,(double)(0)) ) - { - *r = _state->v_nan; - } - if( ae_fp_greater(x,(double)(0)) ) - { - *r = _state->v_posinf; - } - if( ae_fp_less(x,(double)(0)) ) - { - *r = _state->v_neginf; - } + result = ae_true; return result; } - if( ae_fp_eq(x,(double)(0)) ) + if( x->cnt0 - */ - if( ae_fp_less(y,(double)(0)) ) - { - x = -x; - y = -y; - } - - /* - * - */ - if( ae_fp_greater_eq(y,(double)(1)) ) + v = (double)(0); + for(i=0; i<=n-1; i++) { - *r = x/y; - if( ae_fp_less_eq(ae_fabs(*r, _state),ae_minrealnumber) ) - { - result = -1; - *r = (double)(0); - } - else - { - result = 0; - } + v = 0.01*v+x->ptr.p_double[i]; } - else + result = ae_isfinite(v, _state); + return result; +} + + +/************************************************************************* +This function checks that first N values from X[] are finite + + -- ALGLIB -- + Copyright 18.06.2010 by Bochkanov Sergey +*************************************************************************/ +ae_bool isfinitecvector(/* Complex */ ae_vector* z, + ae_int_t n, + ae_state *_state) +{ + ae_int_t i; + ae_bool result; + + + ae_assert(n>=0, "APSERVIsFiniteCVector: internal error (N<0)", _state); + for(i=0; i<=n-1; i++) { - if( ae_fp_greater_eq(ae_fabs(x, _state),ae_maxrealnumber*y) ) - { - if( ae_fp_greater(x,(double)(0)) ) - { - *r = _state->v_posinf; - } - else - { - *r = _state->v_neginf; - } - result = 1; - } - else + if( !ae_isfinite(z->ptr.p_complex[i].x, _state)||!ae_isfinite(z->ptr.p_complex[i].y, _state) ) { - *r = x/y; - result = 0; + result = ae_false; + return result; } } + result = ae_true; return result; } /************************************************************************* -This function calculates "safe" min(X/Y,V) for positive finite X, Y, V. -No overflow is generated in any case. +This function checks that size of X is at least MxN and values from +X[0..M-1,0..N-1] are finite. -- ALGLIB -- - Copyright by Bochkanov Sergey + Copyright 18.06.2010 by Bochkanov Sergey *************************************************************************/ -double safeminposrv(double x, double y, double v, ae_state *_state) +ae_bool apservisfinitematrix(/* Real */ ae_matrix* x, + ae_int_t m, + ae_int_t n, + ae_state *_state) { - double r; - double result; + ae_int_t i; + ae_int_t j; + ae_bool result; - if( ae_fp_greater_eq(y,(double)(1)) ) + ae_assert(n>=0, "APSERVIsFiniteMatrix: internal error (N<0)", _state); + ae_assert(m>=0, "APSERVIsFiniteMatrix: internal error (M<0)", _state); + if( m==0||n==0 ) { - - /* - * Y>=1, we can safely divide by Y - */ - r = x/y; - result = v; - if( ae_fp_greater(v,r) ) - { - result = r; - } - else - { - result = v; - } + result = ae_true; + return result; } - else + if( x->rowscolsptr.pp_double[i][j], _state) ) + { + result = ae_false; + return result; + } } } + result = ae_true; return result; } /************************************************************************* -This function makes periodic mapping of X to [A,B]. - -It accepts X, A, B (A>B). It returns T which lies in [A,B] and integer K, -such that X = T + K*(B-A). - -NOTES: -* K is represented as real value, although actually it is integer -* T is guaranteed to be in [A,B] -* T replaces X +This function checks that all values from X[0..M-1,0..N-1] are finite -- ALGLIB -- - Copyright by Bochkanov Sergey + Copyright 18.06.2010 by Bochkanov Sergey *************************************************************************/ -void apperiodicmap(double* x, - double a, - double b, - double* k, +ae_bool apservisfinitecmatrix(/* Complex */ ae_matrix* x, + ae_int_t m, + ae_int_t n, ae_state *_state) { + ae_int_t i; + ae_int_t j; + ae_bool result; - *k = 0; - ae_assert(ae_fp_less(a,b), "APPeriodicMap: internal error!", _state); - *k = (double)(ae_ifloor((*x-a)/(b-a), _state)); - *x = *x-*k*(b-a); - while(ae_fp_less(*x,a)) - { - *x = *x+(b-a); - *k = *k-1; - } - while(ae_fp_greater(*x,b)) + ae_assert(n>=0, "APSERVIsFiniteCMatrix: internal error (N<0)", _state); + ae_assert(m>=0, "APSERVIsFiniteCMatrix: internal error (M<0)", _state); + for(i=0; i<=m-1; i++) { - *x = *x-(b-a); - *k = *k+1; + for(j=0; j<=n-1; j++) + { + if( !ae_isfinite(x->ptr.pp_complex[i][j].x, _state)||!ae_isfinite(x->ptr.pp_complex[i][j].y, _state) ) + { + result = ae_false; + return result; + } + } } - *x = ae_maxreal(*x, a, _state); - *x = ae_minreal(*x, b, _state); + result = ae_true; + return result; } /************************************************************************* -Returns random normal number using low-quality system-provided generator +This function checks that size of X is at least NxN and all values from +upper/lower triangle of X[0..N-1,0..N-1] are finite -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright 18.06.2010 by Bochkanov Sergey *************************************************************************/ -double randomnormal(ae_state *_state) +ae_bool isfinitertrmatrix(/* Real */ ae_matrix* x, + ae_int_t n, + ae_bool isupper, + ae_state *_state) { - double u; - double v; - double s; - double result; + ae_int_t i; + ae_int_t j1; + ae_int_t j2; + ae_int_t j; + ae_bool result; - for(;;) + ae_assert(n>=0, "APSERVIsFiniteRTRMatrix: internal error (N<0)", _state); + if( n==0 ) { - u = 2*ae_randomreal(_state)-1; - v = 2*ae_randomreal(_state)-1; - s = ae_sqr(u, _state)+ae_sqr(v, _state); - if( ae_fp_greater(s,(double)(0))&&ae_fp_less(s,(double)(1)) ) + result = ae_true; + return result; + } + if( x->rowscolsptr.pp_double[i][j], _state) ) + { + result = ae_false; + return result; + } } } + result = ae_true; return result; } /************************************************************************* -Generates random unit vector using low-quality system-provided generator. -Reallocates array if its size is too short. +This function checks that all values from upper/lower triangle of +X[0..N-1,0..N-1] are finite -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright 18.06.2010 by Bochkanov Sergey *************************************************************************/ -void randomunit(ae_int_t n, /* Real */ ae_vector* x, ae_state *_state) +ae_bool apservisfinitectrmatrix(/* Complex */ ae_matrix* x, + ae_int_t n, + ae_bool isupper, + ae_state *_state) { ae_int_t i; - double v; - double vv; + ae_int_t j1; + ae_int_t j2; + ae_int_t j; + ae_bool result; - ae_assert(n>0, "RandomUnit: N<=0", _state); - if( x->cnt=0, "APSERVIsFiniteCTRMatrix: internal error (N<0)", _state); + for(i=0; i<=n-1; i++) { - v = 0.0; - for(i=0; i<=n-1; i++) + if( isupper ) { - vv = randomnormal(_state); - x->ptr.p_double[i] = vv; - v = v+vv*vv; + j1 = i; + j2 = n-1; + } + else + { + j1 = 0; + j2 = i; + } + for(j=j1; j<=j2; j++) + { + if( !ae_isfinite(x->ptr.pp_complex[i][j].x, _state)||!ae_isfinite(x->ptr.pp_complex[i][j].y, _state) ) + { + result = ae_false; + return result; + } } } - while(ae_fp_less_eq(v,(double)(0))); - v = 1/ae_sqrt(v, _state); - for(i=0; i<=n-1; i++) - { - x->ptr.p_double[i] = x->ptr.p_double[i]*v; - } + result = ae_true; + return result; } /************************************************************************* -This function is used to swap two integer values -*************************************************************************/ -void swapi(ae_int_t* v0, ae_int_t* v1, ae_state *_state) -{ - ae_int_t v; - - - v = *v0; - *v0 = *v1; - *v1 = v; -} - +This function checks that all values from X[0..M-1,0..N-1] are finite or +NaN's. -/************************************************************************* -This function is used to swap two real values + -- ALGLIB -- + Copyright 18.06.2010 by Bochkanov Sergey *************************************************************************/ -void swapr(double* v0, double* v1, ae_state *_state) +ae_bool apservisfiniteornanmatrix(/* Real */ ae_matrix* x, + ae_int_t m, + ae_int_t n, + ae_state *_state) { - double v; + ae_int_t i; + ae_int_t j; + ae_bool result; - v = *v0; - *v0 = *v1; - *v1 = v; + ae_assert(n>=0, "APSERVIsFiniteOrNaNMatrix: internal error (N<0)", _state); + ae_assert(m>=0, "APSERVIsFiniteOrNaNMatrix: internal error (M<0)", _state); + for(i=0; i<=m-1; i++) + { + for(j=0; j<=n-1; j++) + { + if( !(ae_isfinite(x->ptr.pp_double[i][j], _state)||ae_isnan(x->ptr.pp_double[i][j], _state)) ) + { + result = ae_false; + return result; + } + } + } + result = ae_true; + return result; } /************************************************************************* -This function is used to return maximum of three real values +Safe sqrt(x^2+y^2) + + -- ALGLIB -- + Copyright by Bochkanov Sergey *************************************************************************/ -double maxreal3(double v0, double v1, double v2, ae_state *_state) +double safepythag2(double x, double y, ae_state *_state) { + double w; + double xabs; + double yabs; + double z; double result; - result = v0; - if( ae_fp_less(result,v1) ) + xabs = ae_fabs(x, _state); + yabs = ae_fabs(y, _state); + w = ae_maxreal(xabs, yabs, _state); + z = ae_minreal(xabs, yabs, _state); + if( ae_fp_eq(z,(double)(0)) ) { - result = v1; + result = w; } - if( ae_fp_less(result,v2) ) + else { - result = v2; + result = w*ae_sqrt(1+ae_sqr(z/w, _state), _state); } return result; } /************************************************************************* -This function is used to increment value of integer variable +Safe sqrt(x^2+y^2) + + -- ALGLIB -- + Copyright by Bochkanov Sergey *************************************************************************/ -void inc(ae_int_t* v, ae_state *_state) +double safepythag3(double x, double y, double z, ae_state *_state) { + double w; + double result; - *v = *v+1; + w = ae_maxreal(ae_fabs(x, _state), ae_maxreal(ae_fabs(y, _state), ae_fabs(z, _state), _state), _state); + if( ae_fp_eq(w,(double)(0)) ) + { + result = (double)(0); + return result; + } + x = x/w; + y = y/w; + z = z/w; + result = w*ae_sqrt(ae_sqr(x, _state)+ae_sqr(y, _state)+ae_sqr(z, _state), _state); + return result; } /************************************************************************* -This function is used to decrement value of integer variable -*************************************************************************/ -void dec(ae_int_t* v, ae_state *_state) -{ +Safe division. +This function attempts to calculate R=X/Y without overflow. - *v = *v-1; -} +It returns: +* +1, if abs(X/Y)>=MaxRealNumber or undefined - overflow-like situation + (no overlfow is generated, R is either NAN, PosINF, NegINF) +* 0, if MinRealNumber0 + (R contains result, may be zero) +* -1, if 00 ) + + /* + * Two special cases: + * * Y=0 + * * X=0 and Y<>0 + */ + if( ae_fp_eq(y,(double)(0)) ) { - *v = *v-1; + result = 1; + if( ae_fp_eq(x,(double)(0)) ) + { + *r = _state->v_nan; + } + if( ae_fp_greater(x,(double)(0)) ) + { + *r = _state->v_posinf; + } + if( ae_fp_less(x,(double)(0)) ) + { + *r = _state->v_neginf; + } + return result; + } + if( ae_fp_eq(x,(double)(0)) ) + { + *r = (double)(0); + result = 0; + return result; + } + + /* + * make Y>0 + */ + if( ae_fp_less(y,(double)(0)) ) + { + x = -x; + y = -y; + } + + /* + * + */ + if( ae_fp_greater_eq(y,(double)(1)) ) + { + *r = x/y; + if( ae_fp_less_eq(ae_fabs(*r, _state),ae_minrealnumber) ) + { + result = -1; + *r = (double)(0); + } + else + { + result = 0; + } } else { - *v = 0; + if( ae_fp_greater_eq(ae_fabs(x, _state),ae_maxrealnumber*y) ) + { + if( ae_fp_greater(x,(double)(0)) ) + { + *r = _state->v_posinf; + } + else + { + *r = _state->v_neginf; + } + result = 1; + } + else + { + *r = x/y; + result = 0; + } } + return result; } /************************************************************************* -'bounds' value: maps X to [B1,B2] +This function calculates "safe" min(X/Y,V) for positive finite X, Y, V. +No overflow is generated in any case. -- ALGLIB -- - Copyright 20.03.2009 by Bochkanov Sergey + Copyright by Bochkanov Sergey *************************************************************************/ -double boundval(double x, double b1, double b2, ae_state *_state) +double safeminposrv(double x, double y, double v, ae_state *_state) { + double r; double result; - if( ae_fp_less_eq(x,b1) ) + if( ae_fp_greater_eq(y,(double)(1)) ) { - result = b1; - return result; + + /* + * Y>=1, we can safely divide by Y + */ + r = x/y; + result = v; + if( ae_fp_greater(v,r) ) + { + result = r; + } + else + { + result = v; + } } - if( ae_fp_greater_eq(x,b2) ) + else { - result = b2; - return result; + + /* + * Y<1, we can safely multiply by Y + */ + if( ae_fp_less(x,v*y) ) + { + result = x/y; + } + else + { + result = v; + } } - result = x; return result; } /************************************************************************* -Allocation of serializer: complex value -*************************************************************************/ -void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state) -{ - +This function makes periodic mapping of X to [A,B]. - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); -} +It accepts X, A, B (A>B). It returns T which lies in [A,B] and integer K, +such that X = T + K*(B-A). +NOTES: +* K is represented as real value, although actually it is integer +* T is guaranteed to be in [A,B] +* T replaces X -/************************************************************************* -Serialization: complex value + -- ALGLIB -- + Copyright by Bochkanov Sergey *************************************************************************/ -void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state) +void apperiodicmap(double* x, + double a, + double b, + double* k, + ae_state *_state) { + *k = 0; - ae_serializer_serialize_double(s, v.x, _state); - ae_serializer_serialize_double(s, v.y, _state); + ae_assert(ae_fp_less(a,b), "APPeriodicMap: internal error!", _state); + *k = (double)(ae_ifloor((*x-a)/(b-a), _state)); + *x = *x-*k*(b-a); + while(ae_fp_less(*x,a)) + { + *x = *x+(b-a); + *k = *k-1; + } + while(ae_fp_greater(*x,b)) + { + *x = *x-(b-a); + *k = *k+1; + } + *x = ae_maxreal(*x, a, _state); + *x = ae_minreal(*x, b, _state); } /************************************************************************* -Unserialization: complex value +Returns random normal number using low-quality system-provided generator + + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey *************************************************************************/ -ae_complex unserializecomplex(ae_serializer* s, ae_state *_state) +double randomnormal(ae_state *_state) { - ae_complex result; + double u; + double v; + double s; + double result; - ae_serializer_unserialize_double(s, &result.x, _state); - ae_serializer_unserialize_double(s, &result.y, _state); + for(;;) + { + u = 2*ae_randomreal(_state)-1; + v = 2*ae_randomreal(_state)-1; + s = ae_sqr(u, _state)+ae_sqr(v, _state); + if( ae_fp_greater(s,(double)(0))&&ae_fp_less(s,(double)(1)) ) + { + + /* + * two Sqrt's instead of one to + * avoid overflow when S is too small + */ + s = ae_sqrt(-2*ae_log(s, _state), _state)/ae_sqrt(s, _state); + result = u*s; + break; + } + } return result; } /************************************************************************* -Allocation of serializer: real array -*************************************************************************/ -void allocrealarray(ae_serializer* s, - /* Real */ ae_vector* v, - ae_int_t n, - ae_state *_state) +Generates random unit vector using low-quality system-provided generator. +Reallocates array if its size is too short. + + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey +*************************************************************************/ +void randomunit(ae_int_t n, /* Real */ ae_vector* x, ae_state *_state) { ae_int_t i; + double v; + double vv; - if( n<0 ) + ae_assert(n>0, "RandomUnit: N<=0", _state); + if( x->cntcnt; + ae_vector_set_length(x, n, _state); } - ae_serializer_alloc_entry(s); + do + { + v = 0.0; + for(i=0; i<=n-1; i++) + { + vv = randomnormal(_state); + x->ptr.p_double[i] = vv; + v = v+vv*vv; + } + } + while(ae_fp_less_eq(v,(double)(0))); + v = 1/ae_sqrt(v, _state); for(i=0; i<=n-1; i++) { - ae_serializer_alloc_entry(s); + x->ptr.p_double[i] = x->ptr.p_double[i]*v; } } /************************************************************************* -Serialization: complex value +This function is used to swap two integer values *************************************************************************/ -void serializerealarray(ae_serializer* s, - /* Real */ ae_vector* v, - ae_int_t n, - ae_state *_state) +void swapi(ae_int_t* v0, ae_int_t* v1, ae_state *_state) { - ae_int_t i; + ae_int_t v; - if( n<0 ) - { - n = v->cnt; - } - ae_serializer_serialize_int(s, n, _state); - for(i=0; i<=n-1; i++) - { - ae_serializer_serialize_double(s, v->ptr.p_double[i], _state); - } + v = *v0; + *v0 = *v1; + *v1 = v; } /************************************************************************* -Unserialization: complex value +This function is used to swap two real values *************************************************************************/ -void unserializerealarray(ae_serializer* s, - /* Real */ ae_vector* v, +void swapr(double* v0, double* v1, ae_state *_state) +{ + double v; + + + v = *v0; + *v0 = *v1; + *v1 = v; +} + + +/************************************************************************* +This function is used to swap two rows of the matrix; if NCols<0, automatically +determined from the matrix size. +*************************************************************************/ +void swaprows(/* Real */ ae_matrix* a, + ae_int_t i0, + ae_int_t i1, + ae_int_t ncols, ae_state *_state) { - ae_int_t n; - ae_int_t i; - double t; + ae_int_t j; + double v; - ae_vector_clear(v); - ae_serializer_unserialize_int(s, &n, _state); - if( n==0 ) + if( i0==i1 ) { return; } - ae_vector_set_length(v, n, _state); - for(i=0; i<=n-1; i++) + if( ncols<0 ) { - ae_serializer_unserialize_double(s, &t, _state); - v->ptr.p_double[i] = t; + ncols = a->cols; + } + for(j=0; j<=ncols-1; j++) + { + v = a->ptr.pp_double[i0][j]; + a->ptr.pp_double[i0][j] = a->ptr.pp_double[i1][j]; + a->ptr.pp_double[i1][j] = v; } } /************************************************************************* -Allocation of serializer: Integer array +This function is used to swap two cols of the matrix; if NRows<0, automatically +determined from the matrix size. *************************************************************************/ -void allocintegerarray(ae_serializer* s, - /* Integer */ ae_vector* v, - ae_int_t n, +void swapcols(/* Real */ ae_matrix* a, + ae_int_t j0, + ae_int_t j1, + ae_int_t nrows, ae_state *_state) { ae_int_t i; + double v; - if( n<0 ) + if( j0==j1 ) { - n = v->cnt; + return; } - ae_serializer_alloc_entry(s); - for(i=0; i<=n-1; i++) + if( nrows<0 ) { - ae_serializer_alloc_entry(s); + nrows = a->rows; + } + for(i=0; i<=nrows-1; i++) + { + v = a->ptr.pp_double[i][j0]; + a->ptr.pp_double[i][j0] = a->ptr.pp_double[i][j1]; + a->ptr.pp_double[i][j1] = v; } } /************************************************************************* -Serialization: Integer array +This function is used to swap two "entries" in 1-dimensional array composed +from D-element entries *************************************************************************/ -void serializeintegerarray(ae_serializer* s, - /* Integer */ ae_vector* v, - ae_int_t n, +void swapentries(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_int_t entrywidth, ae_state *_state) { - ae_int_t i; + ae_int_t offs0; + ae_int_t offs1; + ae_int_t j; + double v; - if( n<0 ) + if( i0==i1 ) { - n = v->cnt; + return; } - ae_serializer_serialize_int(s, n, _state); - for(i=0; i<=n-1; i++) + offs0 = i0*entrywidth; + offs1 = i1*entrywidth; + for(j=0; j<=entrywidth-1; j++) { - ae_serializer_serialize_int(s, v->ptr.p_int[i], _state); + v = a->ptr.p_double[offs0+j]; + a->ptr.p_double[offs0+j] = a->ptr.p_double[offs1+j]; + a->ptr.p_double[offs1+j] = v; } } /************************************************************************* -Unserialization: complex value +This function is used to swap two elements of the vector *************************************************************************/ -void unserializeintegerarray(ae_serializer* s, - /* Integer */ ae_vector* v, +void swapelements(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, ae_state *_state) { - ae_int_t n; - ae_int_t i; - ae_int_t t; + double v; - ae_vector_clear(v); - ae_serializer_unserialize_int(s, &n, _state); - if( n==0 ) + if( i0==i1 ) { return; } - ae_vector_set_length(v, n, _state); - for(i=0; i<=n-1; i++) - { - ae_serializer_unserialize_int(s, &t, _state); - v->ptr.p_int[i] = t; - } + v = a->ptr.p_double[i0]; + a->ptr.p_double[i0] = a->ptr.p_double[i1]; + a->ptr.p_double[i1] = v; } /************************************************************************* -Allocation of serializer: real matrix +This function is used to swap two elements of the vector *************************************************************************/ -void allocrealmatrix(ae_serializer* s, - /* Real */ ae_matrix* v, - ae_int_t n0, - ae_int_t n1, +void swapelementsi(/* Integer */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, ae_state *_state) { - ae_int_t i; - ae_int_t j; + ae_int_t v; - if( n0<0 ) - { - n0 = v->rows; - } - if( n1<0 ) - { - n1 = v->cols; - } - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - for(i=0; i<=n0-1; i++) + if( i0==i1 ) { - for(j=0; j<=n1-1; j++) - { - ae_serializer_alloc_entry(s); - } + return; } + v = a->ptr.p_int[i0]; + a->ptr.p_int[i0] = a->ptr.p_int[i1]; + a->ptr.p_int[i1] = v; } /************************************************************************* -Serialization: complex value +This function is used to return maximum of three real values *************************************************************************/ -void serializerealmatrix(ae_serializer* s, - /* Real */ ae_matrix* v, - ae_int_t n0, - ae_int_t n1, - ae_state *_state) +double maxreal3(double v0, double v1, double v2, ae_state *_state) { - ae_int_t i; - ae_int_t j; + double result; - if( n0<0 ) - { - n0 = v->rows; - } - if( n1<0 ) + result = v0; + if( ae_fp_less(result,v1) ) { - n1 = v->cols; + result = v1; } - ae_serializer_serialize_int(s, n0, _state); - ae_serializer_serialize_int(s, n1, _state); - for(i=0; i<=n0-1; i++) + if( ae_fp_less(result,v2) ) { - for(j=0; j<=n1-1; j++) - { - ae_serializer_serialize_double(s, v->ptr.pp_double[i][j], _state); - } + result = v2; } + return result; } /************************************************************************* -Unserialization: complex value +This function is used to increment value of integer variable *************************************************************************/ -void unserializerealmatrix(ae_serializer* s, - /* Real */ ae_matrix* v, - ae_state *_state) +void inc(ae_int_t* v, ae_state *_state) { - ae_int_t i; - ae_int_t j; - ae_int_t n0; - ae_int_t n1; - double t; - ae_matrix_clear(v); - ae_serializer_unserialize_int(s, &n0, _state); - ae_serializer_unserialize_int(s, &n1, _state); - if( n0==0||n1==0 ) - { - return; - } - ae_matrix_set_length(v, n0, n1, _state); - for(i=0; i<=n0-1; i++) - { - for(j=0; j<=n1-1; j++) - { - ae_serializer_unserialize_double(s, &t, _state); - v->ptr.pp_double[i][j] = t; - } - } + *v = *v+1; } /************************************************************************* -Copy integer array +This function is used to decrement value of integer variable *************************************************************************/ -void copyintegerarray(/* Integer */ ae_vector* src, - /* Integer */ ae_vector* dst, - ae_state *_state) +void dec(ae_int_t* v, ae_state *_state) { - ae_int_t i; - ae_vector_clear(dst); - if( src->cnt>0 ) - { - ae_vector_set_length(dst, src->cnt, _state); - for(i=0; i<=src->cnt-1; i++) - { - dst->ptr.p_int[i] = src->ptr.p_int[i]; - } - } + *v = *v-1; } /************************************************************************* -Copy real array +This function is used to increment value of integer variable; name of the +function suggests that increment is done in multithreaded setting in the +thread-unsafe manner (optional progress reports which do not need guaranteed +correctness) *************************************************************************/ -void copyrealarray(/* Real */ ae_vector* src, - /* Real */ ae_vector* dst, - ae_state *_state) +void threadunsafeinc(ae_int_t* v, ae_state *_state) { - ae_int_t i; - ae_vector_clear(dst); - if( src->cnt>0 ) - { - ae_vector_set_length(dst, src->cnt, _state); - for(i=0; i<=src->cnt-1; i++) - { - dst->ptr.p_double[i] = src->ptr.p_double[i]; - } - } + *v = *v+1; } /************************************************************************* -Copy real matrix +This function is used to increment value of integer variable; name of the +function suggests that increment is done in multithreaded setting in the +thread-unsafe manner (optional progress reports which do not need guaranteed +correctness) *************************************************************************/ -void copyrealmatrix(/* Real */ ae_matrix* src, - /* Real */ ae_matrix* dst, - ae_state *_state) +void threadunsafeincby(ae_int_t* v, ae_int_t k, ae_state *_state) { - ae_int_t i; - ae_int_t j; - ae_matrix_clear(dst); - if( src->rows>0&&src->cols>0 ) - { - ae_matrix_set_length(dst, src->rows, src->cols, _state); - for(i=0; i<=src->rows-1; i++) - { - for(j=0; j<=src->cols-1; j++) - { - dst->ptr.pp_double[i][j] = src->ptr.pp_double[i][j]; - } - } - } + *v = *v+k; } /************************************************************************* -This function searches integer array. Elements in this array are actually -records, each NRec elements wide. Each record has unique header - NHeader -integer values, which identify it. Records are lexicographically sorted by -header. - -Records are identified by their index, not offset (offset = NRec*index). - -This function searches A (records with indices [I0,I1)) for a record with -header B. It returns index of this record (not offset!), or -1 on failure. - - -- ALGLIB -- - Copyright 28.03.2011 by Bochkanov Sergey +This function performs two operations: +1. decrements value of integer variable, if it is positive +2. explicitly sets variable to zero if it is non-positive +It is used by some algorithms to decrease value of internal counters. *************************************************************************/ -ae_int_t recsearch(/* Integer */ ae_vector* a, - ae_int_t nrec, - ae_int_t nheader, - ae_int_t i0, - ae_int_t i1, - /* Integer */ ae_vector* b, - ae_state *_state) +void countdown(ae_int_t* v, ae_state *_state) { - ae_int_t mididx; - ae_int_t cflag; - ae_int_t k; - ae_int_t offs; - ae_int_t result; - result = -1; - for(;;) + if( *v>0 ) { - if( i0>=i1 ) - { - break; - } - mididx = (i0+i1)/2; - offs = nrec*mididx; - cflag = 0; - for(k=0; k<=nheader-1; k++) - { - if( a->ptr.p_int[offs+k]ptr.p_int[k] ) - { - cflag = -1; - break; - } - if( a->ptr.p_int[offs+k]>b->ptr.p_int[k] ) - { - cflag = 1; - break; - } - } - if( cflag==0 ) - { - result = mididx; - return result; - } - if( cflag<0 ) - { - i0 = mididx+1; - } - else - { - i1 = mididx; - } + *v = *v-1; + } + else + { + *v = 0; } - return result; } /************************************************************************* -This function is used in parallel functions for recurrent division of large -task into two smaller tasks. - -It has following properties: -* it works only for TaskSize>=2 (assertion is thrown otherwise) -* for TaskSize=2, it returns Task0=1, Task1=1 -* in case TaskSize is odd, Task0=TaskSize-1, Task1=1 -* in case TaskSize is even, Task0 and Task1 are approximately TaskSize/2 - and both Task0 and Task1 are even, Task0>=Task1 - - -- ALGLIB -- - Copyright 07.04.2013 by Bochkanov Sergey +This function returns +1 or -1 depending on sign of X. +x=0 results in +1 being returned. *************************************************************************/ -void splitlengtheven(ae_int_t tasksize, - ae_int_t* task0, - ae_int_t* task1, - ae_state *_state) +double possign(double x, ae_state *_state) { + double result; - *task0 = 0; - *task1 = 0; - ae_assert(tasksize>=2, "SplitLengthEven: TaskSize<2", _state); - if( tasksize==2 ) - { - *task0 = 1; - *task1 = 1; - return; - } - if( tasksize%2==0 ) + if( ae_fp_greater_eq(x,(double)(0)) ) { - - /* - * Even division - */ - *task0 = tasksize/2; - *task1 = tasksize/2; - if( *task0%2!=0 ) - { - *task0 = *task0+1; - *task1 = *task1-1; - } + result = (double)(1); } else { - - /* - * Odd task size, split trailing odd part from it. - */ - *task0 = tasksize-1; - *task1 = 1; + result = (double)(-1); } - ae_assert(*task0>=1, "SplitLengthEven: internal error", _state); - ae_assert(*task1>=1, "SplitLengthEven: internal error", _state); + return result; } /************************************************************************* -This function is used in parallel functions for recurrent division of large -task into two smaller tasks. - -It has following properties: -* it works only for TaskSize>=2 and ChunkSize>=2 - (assertion is thrown otherwise) -* Task0+Task1=TaskSize, Task0>0, Task1>0 -* Task0 and Task1 are close to each other -* in case TaskSize>ChunkSize, Task0 is always divisible by ChunkSize - - -- ALGLIB -- - Copyright 07.04.2013 by Bochkanov Sergey +This function returns product of two real numbers. It is convenient when +you have to perform typecast-and-product of two INTEGERS. *************************************************************************/ -void splitlength(ae_int_t tasksize, - ae_int_t chunksize, - ae_int_t* task0, - ae_int_t* task1, - ae_state *_state) +double rmul2(double v0, double v1, ae_state *_state) { + double result; - *task0 = 0; - *task1 = 0; - ae_assert(chunksize>=2, "SplitLength: ChunkSize<2", _state); - ae_assert(tasksize>=2, "SplitLength: TaskSize<2", _state); - *task0 = tasksize/2; - if( *task0>chunksize&&*task0%chunksize!=0 ) - { - *task0 = *task0-*task0%chunksize; - } - *task1 = tasksize-(*task0); - ae_assert(*task0>=1, "SplitLength: internal error", _state); - ae_assert(*task1>=1, "SplitLength: internal error", _state); + result = v0*v1; + return result; } /************************************************************************* -This function is used to calculate number of chunks (including partial, -non-complete chunks) in some set. It expects that ChunkSize>=1, TaskSize>=0. -Assertion is thrown otherwise. +This function returns product of three real numbers. It is convenient when +you have to perform typecast-and-product of two INTEGERS. +*************************************************************************/ +double rmul3(double v0, double v1, double v2, ae_state *_state) +{ + double result; -Function result is equivalent to Ceil(TaskSize/ChunkSize), but with guarantees -that rounding errors won't ruin results. - -- ALGLIB -- - Copyright 21.01.2015 by Bochkanov Sergey + result = v0*v1*v2; + return result; +} + + +/************************************************************************* +This function returns (A div B) rounded up; it expects that A>0, B>0, but +does not check it. *************************************************************************/ -ae_int_t chunkscount(ae_int_t tasksize, - ae_int_t chunksize, - ae_state *_state) +ae_int_t idivup(ae_int_t a, ae_int_t b, ae_state *_state) { ae_int_t result; - ae_assert(tasksize>=0, "ChunksCount: TaskSize<0", _state); - ae_assert(chunksize>=1, "ChunksCount: ChunkSize<1", _state); - result = tasksize/chunksize; - if( tasksize%chunksize!=0 ) + result = a/b; + if( a%b>0 ) { result = result+1; } @@ -2230,4282 +2473,3145 @@ } -void _apbuffers_init(void* _p, ae_state *_state) +/************************************************************************* +This function returns min(i0,i1) +*************************************************************************/ +ae_int_t imin2(ae_int_t i0, ae_int_t i1, ae_state *_state) { - apbuffers *p = (apbuffers*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->ba0, 0, DT_BOOL, _state); - ae_vector_init(&p->ia0, 0, DT_INT, _state); - ae_vector_init(&p->ia1, 0, DT_INT, _state); - ae_vector_init(&p->ia2, 0, DT_INT, _state); - ae_vector_init(&p->ia3, 0, DT_INT, _state); - ae_vector_init(&p->ra0, 0, DT_REAL, _state); - ae_vector_init(&p->ra1, 0, DT_REAL, _state); - ae_vector_init(&p->ra2, 0, DT_REAL, _state); - ae_vector_init(&p->ra3, 0, DT_REAL, _state); - ae_matrix_init(&p->rm0, 0, 0, DT_REAL, _state); - ae_matrix_init(&p->rm1, 0, 0, DT_REAL, _state); -} + ae_int_t result; -void _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state) -{ - apbuffers *dst = (apbuffers*)_dst; - apbuffers *src = (apbuffers*)_src; - ae_vector_init_copy(&dst->ba0, &src->ba0, _state); - ae_vector_init_copy(&dst->ia0, &src->ia0, _state); - ae_vector_init_copy(&dst->ia1, &src->ia1, _state); - ae_vector_init_copy(&dst->ia2, &src->ia2, _state); - ae_vector_init_copy(&dst->ia3, &src->ia3, _state); - ae_vector_init_copy(&dst->ra0, &src->ra0, _state); - ae_vector_init_copy(&dst->ra1, &src->ra1, _state); - ae_vector_init_copy(&dst->ra2, &src->ra2, _state); - ae_vector_init_copy(&dst->ra3, &src->ra3, _state); - ae_matrix_init_copy(&dst->rm0, &src->rm0, _state); - ae_matrix_init_copy(&dst->rm1, &src->rm1, _state); + result = i0; + if( i1ba0); - ae_vector_clear(&p->ia0); - ae_vector_clear(&p->ia1); - ae_vector_clear(&p->ia2); - ae_vector_clear(&p->ia3); - ae_vector_clear(&p->ra0); - ae_vector_clear(&p->ra1); - ae_vector_clear(&p->ra2); - ae_vector_clear(&p->ra3); - ae_matrix_clear(&p->rm0); - ae_matrix_clear(&p->rm1); -} + ae_int_t result; -void _apbuffers_destroy(void* _p) -{ - apbuffers *p = (apbuffers*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->ba0); - ae_vector_destroy(&p->ia0); - ae_vector_destroy(&p->ia1); - ae_vector_destroy(&p->ia2); - ae_vector_destroy(&p->ia3); - ae_vector_destroy(&p->ra0); - ae_vector_destroy(&p->ra1); - ae_vector_destroy(&p->ra2); - ae_vector_destroy(&p->ra3); - ae_matrix_destroy(&p->rm0); - ae_matrix_destroy(&p->rm1); + result = i0; + if( i1val = src->val; + result = i0; + if( i1>result ) + { + result = i1; + } + return result; } -void _sboolean_clear(void* _p) +/************************************************************************* +This function returns max(i0,i1,i2) +*************************************************************************/ +ae_int_t imax3(ae_int_t i0, ae_int_t i1, ae_int_t i2, ae_state *_state) { - sboolean *p = (sboolean*)_p; - ae_touch_ptr((void*)p); -} + ae_int_t result; -void _sboolean_destroy(void* _p) -{ - sboolean *p = (sboolean*)_p; - ae_touch_ptr((void*)p); -} - - -void _sbooleanarray_init(void* _p, ae_state *_state) -{ - sbooleanarray *p = (sbooleanarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->val, 0, DT_BOOL, _state); + result = i0; + if( i1>result ) + { + result = i1; + } + if( i2>result ) + { + result = i2; + } + return result; } -void _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +This function returns max(r0,r1,r2) +*************************************************************************/ +double rmax3(double r0, double r1, double r2, ae_state *_state) { - sbooleanarray *dst = (sbooleanarray*)_dst; - sbooleanarray *src = (sbooleanarray*)_src; - ae_vector_init_copy(&dst->val, &src->val, _state); -} + double result; -void _sbooleanarray_clear(void* _p) -{ - sbooleanarray *p = (sbooleanarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->val); + result = r0; + if( ae_fp_greater(r1,result) ) + { + result = r1; + } + if( ae_fp_greater(r2,result) ) + { + result = r2; + } + return result; } -void _sbooleanarray_destroy(void* _p) +/************************************************************************* +This function returns max(|r0|,|r1|,|r2|) +*************************************************************************/ +double rmaxabs3(double r0, double r1, double r2, ae_state *_state) { - sbooleanarray *p = (sbooleanarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->val); -} + double result; -void _sinteger_init(void* _p, ae_state *_state) -{ - sinteger *p = (sinteger*)_p; - ae_touch_ptr((void*)p); + r0 = ae_fabs(r0, _state); + r1 = ae_fabs(r1, _state); + r2 = ae_fabs(r2, _state); + result = r0; + if( ae_fp_greater(r1,result) ) + { + result = r1; + } + if( ae_fp_greater(r2,result) ) + { + result = r2; + } + return result; } -void _sinteger_init_copy(void* _dst, void* _src, ae_state *_state) -{ - sinteger *dst = (sinteger*)_dst; - sinteger *src = (sinteger*)_src; - dst->val = src->val; -} - +/************************************************************************* +'bounds' value: maps X to [B1,B2] -void _sinteger_clear(void* _p) + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey +*************************************************************************/ +double boundval(double x, double b1, double b2, ae_state *_state) { - sinteger *p = (sinteger*)_p; - ae_touch_ptr((void*)p); -} + double result; -void _sinteger_destroy(void* _p) -{ - sinteger *p = (sinteger*)_p; - ae_touch_ptr((void*)p); + if( ae_fp_less_eq(x,b1) ) + { + result = b1; + return result; + } + if( ae_fp_greater_eq(x,b2) ) + { + result = b2; + return result; + } + result = x; + return result; } -void _sintegerarray_init(void* _p, ae_state *_state) -{ - sintegerarray *p = (sintegerarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->val, 0, DT_INT, _state); -} - +/************************************************************************* +'bounds' value: maps X to [B1,B2] -void _sintegerarray_init_copy(void* _dst, void* _src, ae_state *_state) + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey +*************************************************************************/ +ae_int_t iboundval(ae_int_t x, ae_int_t b1, ae_int_t b2, ae_state *_state) { - sintegerarray *dst = (sintegerarray*)_dst; - sintegerarray *src = (sintegerarray*)_src; - ae_vector_init_copy(&dst->val, &src->val, _state); -} + ae_int_t result; -void _sintegerarray_clear(void* _p) -{ - sintegerarray *p = (sintegerarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->val); + if( x<=b1 ) + { + result = b1; + return result; + } + if( x>=b2 ) + { + result = b2; + return result; + } + result = x; + return result; } -void _sintegerarray_destroy(void* _p) +/************************************************************************* +'bounds' value: maps X to [B1,B2] + + -- ALGLIB -- + Copyright 20.03.2009 by Bochkanov Sergey +*************************************************************************/ +double rboundval(double x, double b1, double b2, ae_state *_state) { - sintegerarray *p = (sintegerarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->val); -} + double result; -void _sreal_init(void* _p, ae_state *_state) -{ - sreal *p = (sreal*)_p; - ae_touch_ptr((void*)p); + if( ae_fp_less_eq(x,b1) ) + { + result = b1; + return result; + } + if( ae_fp_greater_eq(x,b2) ) + { + result = b2; + return result; + } + result = x; + return result; } -void _sreal_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Returns number of non-zeros +*************************************************************************/ +ae_int_t countnz1(/* Real */ ae_vector* v, + ae_int_t n, + ae_state *_state) { - sreal *dst = (sreal*)_dst; - sreal *src = (sreal*)_src; - dst->val = src->val; -} + ae_int_t i; + ae_int_t result; -void _sreal_clear(void* _p) -{ - sreal *p = (sreal*)_p; - ae_touch_ptr((void*)p); + result = 0; + for(i=0; i<=n-1; i++) + { + if( !(v->ptr.p_double[i]==0) ) + { + result = result+1; + } + } + return result; } -void _sreal_destroy(void* _p) +/************************************************************************* +Returns number of non-zeros +*************************************************************************/ +ae_int_t countnz2(/* Real */ ae_matrix* v, + ae_int_t m, + ae_int_t n, + ae_state *_state) { - sreal *p = (sreal*)_p; - ae_touch_ptr((void*)p); -} + ae_int_t i; + ae_int_t j; + ae_int_t result; -void _srealarray_init(void* _p, ae_state *_state) -{ - srealarray *p = (srealarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->val, 0, DT_REAL, _state); + result = 0; + for(i=0; i<=m-1; i++) + { + for(j=0; j<=n-1; j++) + { + if( !(v->ptr.pp_double[i][j]==0) ) + { + result = result+1; + } + } + } + return result; } -void _srealarray_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Allocation of serializer: complex value +*************************************************************************/ +void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state) { - srealarray *dst = (srealarray*)_dst; - srealarray *src = (srealarray*)_src; - ae_vector_init_copy(&dst->val, &src->val, _state); -} -void _srealarray_clear(void* _p) -{ - srealarray *p = (srealarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->val); + ae_serializer_alloc_entry(s); + ae_serializer_alloc_entry(s); } -void _srealarray_destroy(void* _p) +/************************************************************************* +Serialization: complex value +*************************************************************************/ +void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state) { - srealarray *p = (srealarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->val); -} -void _scomplex_init(void* _p, ae_state *_state) -{ - scomplex *p = (scomplex*)_p; - ae_touch_ptr((void*)p); + ae_serializer_serialize_double(s, v.x, _state); + ae_serializer_serialize_double(s, v.y, _state); } -void _scomplex_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Unserialization: complex value +*************************************************************************/ +ae_complex unserializecomplex(ae_serializer* s, ae_state *_state) { - scomplex *dst = (scomplex*)_dst; - scomplex *src = (scomplex*)_src; - dst->val = src->val; -} + ae_complex result; -void _scomplex_clear(void* _p) -{ - scomplex *p = (scomplex*)_p; - ae_touch_ptr((void*)p); + ae_serializer_unserialize_double(s, &result.x, _state); + ae_serializer_unserialize_double(s, &result.y, _state); + return result; } -void _scomplex_destroy(void* _p) +/************************************************************************* +Allocation of serializer: real array +*************************************************************************/ +void allocrealarray(ae_serializer* s, + /* Real */ ae_vector* v, + ae_int_t n, + ae_state *_state) { - scomplex *p = (scomplex*)_p; - ae_touch_ptr((void*)p); -} + ae_int_t i; -void _scomplexarray_init(void* _p, ae_state *_state) -{ - scomplexarray *p = (scomplexarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->val, 0, DT_COMPLEX, _state); + if( n<0 ) + { + n = v->cnt; + } + ae_serializer_alloc_entry(s); + for(i=0; i<=n-1; i++) + { + ae_serializer_alloc_entry(s); + } } -void _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Serialization: complex value +*************************************************************************/ +void serializerealarray(ae_serializer* s, + /* Real */ ae_vector* v, + ae_int_t n, + ae_state *_state) { - scomplexarray *dst = (scomplexarray*)_dst; - scomplexarray *src = (scomplexarray*)_src; - ae_vector_init_copy(&dst->val, &src->val, _state); -} + ae_int_t i; -void _scomplexarray_clear(void* _p) -{ - scomplexarray *p = (scomplexarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->val); + if( n<0 ) + { + n = v->cnt; + } + ae_serializer_serialize_int(s, n, _state); + for(i=0; i<=n-1; i++) + { + ae_serializer_serialize_double(s, v->ptr.p_double[i], _state); + } } -void _scomplexarray_destroy(void* _p) +/************************************************************************* +Unserialization: complex value +*************************************************************************/ +void unserializerealarray(ae_serializer* s, + /* Real */ ae_vector* v, + ae_state *_state) { - scomplexarray *p = (scomplexarray*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->val); -} + ae_int_t n; + ae_int_t i; + double t; + ae_vector_clear(v); + ae_serializer_unserialize_int(s, &n, _state); + if( n==0 ) + { + return; + } + ae_vector_set_length(v, n, _state); + for(i=0; i<=n-1; i++) + { + ae_serializer_unserialize_double(s, &t, _state); + v->ptr.p_double[i] = t; + } +} -ae_int_t getrdfserializationcode(ae_state *_state) +/************************************************************************* +Allocation of serializer: Integer array +*************************************************************************/ +void allocintegerarray(ae_serializer* s, + /* Integer */ ae_vector* v, + ae_int_t n, + ae_state *_state) { - ae_int_t result; + ae_int_t i; - result = 1; - return result; + if( n<0 ) + { + n = v->cnt; + } + ae_serializer_alloc_entry(s); + for(i=0; i<=n-1; i++) + { + ae_serializer_alloc_entry(s); + } } -ae_int_t getkdtreeserializationcode(ae_state *_state) +/************************************************************************* +Serialization: Integer array +*************************************************************************/ +void serializeintegerarray(ae_serializer* s, + /* Integer */ ae_vector* v, + ae_int_t n, + ae_state *_state) { - ae_int_t result; + ae_int_t i; - result = 2; - return result; + if( n<0 ) + { + n = v->cnt; + } + ae_serializer_serialize_int(s, n, _state); + for(i=0; i<=n-1; i++) + { + ae_serializer_serialize_int(s, v->ptr.p_int[i], _state); + } } -ae_int_t getmlpserializationcode(ae_state *_state) +/************************************************************************* +Unserialization: complex value +*************************************************************************/ +void unserializeintegerarray(ae_serializer* s, + /* Integer */ ae_vector* v, + ae_state *_state) { - ae_int_t result; + ae_int_t n; + ae_int_t i; + ae_int_t t; + ae_vector_clear(v); - result = 3; - return result; + ae_serializer_unserialize_int(s, &n, _state); + if( n==0 ) + { + return; + } + ae_vector_set_length(v, n, _state); + for(i=0; i<=n-1; i++) + { + ae_serializer_unserialize_int(s, &t, _state); + v->ptr.p_int[i] = t; + } } -ae_int_t getmlpeserializationcode(ae_state *_state) +/************************************************************************* +Allocation of serializer: real matrix +*************************************************************************/ +void allocrealmatrix(ae_serializer* s, + /* Real */ ae_matrix* v, + ae_int_t n0, + ae_int_t n1, + ae_state *_state) { - ae_int_t result; + ae_int_t i; + ae_int_t j; - result = 4; - return result; + if( n0<0 ) + { + n0 = v->rows; + } + if( n1<0 ) + { + n1 = v->cols; + } + ae_serializer_alloc_entry(s); + ae_serializer_alloc_entry(s); + for(i=0; i<=n0-1; i++) + { + for(j=0; j<=n1-1; j++) + { + ae_serializer_alloc_entry(s); + } + } } -ae_int_t getrbfserializationcode(ae_state *_state) +/************************************************************************* +Serialization: complex value +*************************************************************************/ +void serializerealmatrix(ae_serializer* s, + /* Real */ ae_matrix* v, + ae_int_t n0, + ae_int_t n1, + ae_state *_state) { - ae_int_t result; + ae_int_t i; + ae_int_t j; - result = 5; - return result; + if( n0<0 ) + { + n0 = v->rows; + } + if( n1<0 ) + { + n1 = v->cols; + } + ae_serializer_serialize_int(s, n0, _state); + ae_serializer_serialize_int(s, n1, _state); + for(i=0; i<=n0-1; i++) + { + for(j=0; j<=n1-1; j++) + { + ae_serializer_serialize_double(s, v->ptr.pp_double[i][j], _state); + } + } } - - /************************************************************************* -This function sorts array of real keys by ascending. - -Its results are: -* sorted array A -* permutation tables P1, P2 - -Algorithm outputs permutation tables using two formats: -* as usual permutation of [0..N-1]. If P1[i]=j, then sorted A[i] contains - value which was moved there from J-th position. -* as a sequence of pairwise permutations. Sorted A[] may be obtained by - swaping A[i] and A[P2[i]] for all i from 0 to N-1. - -INPUT PARAMETERS: - A - unsorted array - N - array size - -OUPUT PARAMETERS: - A - sorted array - P1, P2 - permutation tables, array[N] - -NOTES: - this function assumes that A[] is finite; it doesn't checks that - condition. All other conditions (size of input arrays, etc.) are not - checked too. - - -- ALGLIB -- - Copyright 14.05.2008 by Bochkanov Sergey +Unserialization: complex value *************************************************************************/ -void tagsort(/* Real */ ae_vector* a, - ae_int_t n, - /* Integer */ ae_vector* p1, - /* Integer */ ae_vector* p2, +void unserializerealmatrix(ae_serializer* s, + /* Real */ ae_matrix* v, ae_state *_state) { - ae_frame _frame_block; - apbuffers buf; + ae_int_t i; + ae_int_t j; + ae_int_t n0; + ae_int_t n1; + double t; - ae_frame_make(_state, &_frame_block); - ae_vector_clear(p1); - ae_vector_clear(p2); - _apbuffers_init(&buf, _state); + ae_matrix_clear(v); - tagsortbuf(a, n, p1, p2, &buf, _state); - ae_frame_leave(_state); + ae_serializer_unserialize_int(s, &n0, _state); + ae_serializer_unserialize_int(s, &n1, _state); + if( n0==0||n1==0 ) + { + return; + } + ae_matrix_set_length(v, n0, n1, _state); + for(i=0; i<=n0-1; i++) + { + for(j=0; j<=n1-1; j++) + { + ae_serializer_unserialize_double(s, &t, _state); + v->ptr.pp_double[i][j] = t; + } + } } /************************************************************************* -Buffered variant of TagSort, which accepts preallocated output arrays as -well as special structure for buffered allocations. If arrays are too -short, they are reallocated. If they are large enough, no memory -allocation is done. - -It is intended to be used in the performance-critical parts of code, where -additional allocations can lead to severe performance degradation - - -- ALGLIB -- - Copyright 14.05.2008 by Bochkanov Sergey +Copy boolean array *************************************************************************/ -void tagsortbuf(/* Real */ ae_vector* a, - ae_int_t n, - /* Integer */ ae_vector* p1, - /* Integer */ ae_vector* p2, - apbuffers* buf, +void copybooleanarray(/* Boolean */ ae_vector* src, + /* Boolean */ ae_vector* dst, ae_state *_state) { ae_int_t i; - ae_int_t lv; - ae_int_t lp; - ae_int_t rv; - ae_int_t rp; + ae_vector_clear(dst); - - /* - * Special cases - */ - if( n<=0 ) - { - return; - } - if( n==1 ) - { - ivectorsetlengthatleast(p1, 1, _state); - ivectorsetlengthatleast(p2, 1, _state); - p1->ptr.p_int[0] = 0; - p2->ptr.p_int[0] = 0; - return; - } - - /* - * General case, N>1: prepare permutations table P1 - */ - ivectorsetlengthatleast(p1, n, _state); - for(i=0; i<=n-1; i++) - { - p1->ptr.p_int[i] = i; - } - - /* - * General case, N>1: sort, update P1 - */ - rvectorsetlengthatleast(&buf->ra0, n, _state); - ivectorsetlengthatleast(&buf->ia0, n, _state); - tagsortfasti(a, p1, &buf->ra0, &buf->ia0, n, _state); - - /* - * General case, N>1: fill permutations table P2 - * - * To fill P2 we maintain two arrays: - * * PV (Buf.IA0), Position(Value). PV[i] contains position of I-th key at the moment - * * VP (Buf.IA1), Value(Position). VP[i] contains key which has position I at the moment - * - * At each step we making permutation of two items: - * Left, which is given by position/value pair LP/LV - * and Right, which is given by RP/RV - * and updating PV[] and VP[] correspondingly. - */ - ivectorsetlengthatleast(&buf->ia0, n, _state); - ivectorsetlengthatleast(&buf->ia1, n, _state); - ivectorsetlengthatleast(p2, n, _state); - for(i=0; i<=n-1; i++) - { - buf->ia0.ptr.p_int[i] = i; - buf->ia1.ptr.p_int[i] = i; - } - for(i=0; i<=n-1; i++) + if( src->cnt>0 ) { - - /* - * calculate LP, LV, RP, RV - */ - lp = i; - lv = buf->ia1.ptr.p_int[lp]; - rv = p1->ptr.p_int[i]; - rp = buf->ia0.ptr.p_int[rv]; - - /* - * Fill P2 - */ - p2->ptr.p_int[i] = rp; - - /* - * update PV and VP - */ - buf->ia1.ptr.p_int[lp] = rv; - buf->ia1.ptr.p_int[rp] = lv; - buf->ia0.ptr.p_int[lv] = rp; - buf->ia0.ptr.p_int[rv] = lp; + ae_vector_set_length(dst, src->cnt, _state); + for(i=0; i<=src->cnt-1; i++) + { + dst->ptr.p_bool[i] = src->ptr.p_bool[i]; + } } } /************************************************************************* -Same as TagSort, but optimized for real keys and integer labels. - -A is sorted, and same permutations are applied to B. - -NOTES: -1. this function assumes that A[] is finite; it doesn't checks that - condition. All other conditions (size of input arrays, etc.) are not - checked too. -2. this function uses two buffers, BufA and BufB, each is N elements large. - They may be preallocated (which will save some time) or not, in which - case function will automatically allocate memory. - - -- ALGLIB -- - Copyright 11.12.2008 by Bochkanov Sergey +Copy integer array *************************************************************************/ -void tagsortfasti(/* Real */ ae_vector* a, - /* Integer */ ae_vector* b, - /* Real */ ae_vector* bufa, - /* Integer */ ae_vector* bufb, - ae_int_t n, +void copyintegerarray(/* Integer */ ae_vector* src, + /* Integer */ ae_vector* dst, ae_state *_state) { ae_int_t i; - ae_int_t j; - ae_bool isascending; - ae_bool isdescending; - double tmpr; - ae_int_t tmpi; + ae_vector_clear(dst); - - /* - * Special case - */ - if( n<=1 ) - { - return; - } - - /* - * Test for already sorted set - */ - isascending = ae_true; - isdescending = ae_true; - for(i=1; i<=n-1; i++) - { - isascending = isascending&&a->ptr.p_double[i]>=a->ptr.p_double[i-1]; - isdescending = isdescending&&a->ptr.p_double[i]<=a->ptr.p_double[i-1]; - } - if( isascending ) - { - return; - } - if( isdescending ) + if( src->cnt>0 ) { - for(i=0; i<=n-1; i++) + ae_vector_set_length(dst, src->cnt, _state); + for(i=0; i<=src->cnt-1; i++) { - j = n-1-i; - if( j<=i ) - { - break; - } - tmpr = a->ptr.p_double[i]; - a->ptr.p_double[i] = a->ptr.p_double[j]; - a->ptr.p_double[j] = tmpr; - tmpi = b->ptr.p_int[i]; - b->ptr.p_int[i] = b->ptr.p_int[j]; - b->ptr.p_int[j] = tmpi; + dst->ptr.p_int[i] = src->ptr.p_int[i]; } - return; - } - - /* - * General case - */ - if( bufa->cntcntcnt>0 ) + { + ae_vector_set_length(dst, src->cnt, _state); + for(i=0; i<=src->cnt-1; i++) + { + dst->ptr.p_double[i] = src->ptr.p_double[i]; + } + } +} - -- ALGLIB -- - Copyright 11.12.2008 by Bochkanov Sergey + +/************************************************************************* +Copy real matrix *************************************************************************/ -void tagsortfastr(/* Real */ ae_vector* a, - /* Real */ ae_vector* b, - /* Real */ ae_vector* bufa, - /* Real */ ae_vector* bufb, - ae_int_t n, +void copyrealmatrix(/* Real */ ae_matrix* src, + /* Real */ ae_matrix* dst, ae_state *_state) { ae_int_t i; ae_int_t j; - ae_bool isascending; - ae_bool isdescending; - double tmpr; + ae_matrix_clear(dst); - - /* - * Special case - */ - if( n<=1 ) - { - return; - } - - /* - * Test for already sorted set - */ - isascending = ae_true; - isdescending = ae_true; - for(i=1; i<=n-1; i++) - { - isascending = isascending&&a->ptr.p_double[i]>=a->ptr.p_double[i-1]; - isdescending = isdescending&&a->ptr.p_double[i]<=a->ptr.p_double[i-1]; - } - if( isascending ) - { - return; - } - if( isdescending ) + if( src->rows>0&&src->cols>0 ) { - for(i=0; i<=n-1; i++) + ae_matrix_set_length(dst, src->rows, src->cols, _state); + for(i=0; i<=src->rows-1; i++) { - j = n-1-i; - if( j<=i ) + for(j=0; j<=src->cols-1; j++) { - break; + dst->ptr.pp_double[i][j] = src->ptr.pp_double[i][j]; } - tmpr = a->ptr.p_double[i]; - a->ptr.p_double[i] = a->ptr.p_double[j]; - a->ptr.p_double[j] = tmpr; - tmpr = b->ptr.p_double[i]; - b->ptr.p_double[i] = b->ptr.p_double[j]; - b->ptr.p_double[j] = tmpr; } - return; } - - /* - * General case - */ - if( bufa->cntcnt=2 and TaskSize>TileSize (assertion is thrown otherwise) +* Task0+Task1=TaskSize, Task0>0, Task1>0 +* Task0 and Task1 are close to each other +* Task0>=Task1 +* Task0 is always divisible by TileSize -- ALGLIB -- - Copyright 11.12.2008 by Bochkanov Sergey + Copyright 07.04.2013 by Bochkanov Sergey *************************************************************************/ -void tagsortfast(/* Real */ ae_vector* a, - /* Real */ ae_vector* bufa, - ae_int_t n, +void tiledsplit(ae_int_t tasksize, + ae_int_t tilesize, + ae_int_t* task0, + ae_int_t* task1, ae_state *_state) { - ae_int_t i; - ae_int_t j; - ae_bool isascending; - ae_bool isdescending; - double tmpr; + ae_int_t cc; + *task0 = 0; + *task1 = 0; - - /* - * Special case - */ - if( n<=1 ) - { - return; - } - - /* - * Test for already sorted set - */ - isascending = ae_true; - isdescending = ae_true; - for(i=1; i<=n-1; i++) - { - isascending = isascending&&a->ptr.p_double[i]>=a->ptr.p_double[i-1]; - isdescending = isdescending&&a->ptr.p_double[i]<=a->ptr.p_double[i-1]; - } - if( isascending ) - { - return; - } - if( isdescending ) - { - for(i=0; i<=n-1; i++) - { - j = n-1-i; - if( j<=i ) - { - break; - } - tmpr = a->ptr.p_double[i]; - a->ptr.p_double[i] = a->ptr.p_double[j]; - a->ptr.p_double[j] = tmpr; - } - return; - } - - /* - * General case - */ - if( bufa->cnt=2, "TiledSplit: TaskSize<2", _state); + ae_assert(tasksize>tilesize, "TiledSplit: TaskSize<=TileSize", _state); + cc = chunkscount(tasksize, tilesize, _state); + ae_assert(cc>=2, "TiledSplit: integrity check failed", _state); + *task0 = idivup(cc, 2, _state)*tilesize; + *task1 = tasksize-(*task0); + ae_assert(*task0>=1, "TiledSplit: internal error", _state); + ae_assert(*task1>=1, "TiledSplit: internal error", _state); + ae_assert(*task0%tilesize==0, "TiledSplit: internal error", _state); + ae_assert(*task0>=(*task1), "TiledSplit: internal error", _state); } /************************************************************************* -Sorting function optimized for integer keys and real labels, can be used -to sort middle of the array +This function searches integer array. Elements in this array are actually +records, each NRec elements wide. Each record has unique header - NHeader +integer values, which identify it. Records are lexicographically sorted by +header. -A is sorted, and same permutations are applied to B. +Records are identified by their index, not offset (offset = NRec*index). -NOTES: - this function assumes that A[] is finite; it doesn't checks that - condition. All other conditions (size of input arrays, etc.) are not - checked too. +This function searches A (records with indices [I0,I1)) for a record with +header B. It returns index of this record (not offset!), or -1 on failure. -- ALGLIB -- - Copyright 11.12.2008 by Bochkanov Sergey + Copyright 28.03.2011 by Bochkanov Sergey *************************************************************************/ -void tagsortmiddleir(/* Integer */ ae_vector* a, - /* Real */ ae_vector* b, - ae_int_t offset, - ae_int_t n, +ae_int_t recsearch(/* Integer */ ae_vector* a, + ae_int_t nrec, + ae_int_t nheader, + ae_int_t i0, + ae_int_t i1, + /* Integer */ ae_vector* b, ae_state *_state) { - ae_int_t i; + ae_int_t mididx; + ae_int_t cflag; ae_int_t k; - ae_int_t t; - ae_int_t tmp; - double tmpr; + ae_int_t offs; + ae_int_t result; - - /* - * Special cases - */ - if( n<=1 ) - { - return; - } - - /* - * General case, N>1: sort, update B - */ - i = 2; - do + result = -1; + for(;;) { - t = i; - while(t!=1) + if( i0>=i1 ) { - k = t/2; - if( a->ptr.p_int[offset+k-1]>=a->ptr.p_int[offset+t-1] ) - { - t = 1; - } - else - { - tmp = a->ptr.p_int[offset+k-1]; - a->ptr.p_int[offset+k-1] = a->ptr.p_int[offset+t-1]; - a->ptr.p_int[offset+t-1] = tmp; - tmpr = b->ptr.p_double[offset+k-1]; - b->ptr.p_double[offset+k-1] = b->ptr.p_double[offset+t-1]; - b->ptr.p_double[offset+t-1] = tmpr; - t = k; - } + break; } - i = i+1; - } - while(i<=n); - i = n-1; - do - { - tmp = a->ptr.p_int[offset+i]; - a->ptr.p_int[offset+i] = a->ptr.p_int[offset+0]; - a->ptr.p_int[offset+0] = tmp; - tmpr = b->ptr.p_double[offset+i]; - b->ptr.p_double[offset+i] = b->ptr.p_double[offset+0]; - b->ptr.p_double[offset+0] = tmpr; - t = 1; - while(t!=0) + mididx = (i0+i1)/2; + offs = nrec*mididx; + cflag = 0; + for(k=0; k<=nheader-1; k++) { - k = 2*t; - if( k>i ) + if( a->ptr.p_int[offs+k]ptr.p_int[k] ) { - t = 0; + cflag = -1; + break; } - else + if( a->ptr.p_int[offs+k]>b->ptr.p_int[k] ) { - if( kptr.p_int[offset+k]>a->ptr.p_int[offset+k-1] ) - { - k = k+1; - } - } - if( a->ptr.p_int[offset+t-1]>=a->ptr.p_int[offset+k-1] ) - { - t = 0; - } - else - { - tmp = a->ptr.p_int[offset+k-1]; - a->ptr.p_int[offset+k-1] = a->ptr.p_int[offset+t-1]; - a->ptr.p_int[offset+t-1] = tmp; - tmpr = b->ptr.p_double[offset+k-1]; - b->ptr.p_double[offset+k-1] = b->ptr.p_double[offset+t-1]; - b->ptr.p_double[offset+t-1] = tmpr; - t = k; - } + cflag = 1; + break; } } - i = i-1; + if( cflag==0 ) + { + result = mididx; + return result; + } + if( cflag<0 ) + { + i0 = mididx+1; + } + else + { + i1 = mididx; + } } - while(i>=1); + return result; } /************************************************************************* -Heap operations: adds element to the heap +This function is used in parallel functions for recurrent division of large +task into two smaller tasks. -PARAMETERS: - A - heap itself, must be at least array[0..N] - B - array of integer tags, which are updated according to - permutations in the heap - N - size of the heap (without new element). - updated on output - VA - value of the element being added - VB - value of the tag +It has following properties: +* it works only for TaskSize>=2 (assertion is thrown otherwise) +* for TaskSize=2, it returns Task0=1, Task1=1 +* in case TaskSize is odd, Task0=TaskSize-1, Task1=1 +* in case TaskSize is even, Task0 and Task1 are approximately TaskSize/2 + and both Task0 and Task1 are even, Task0>=Task1 -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 07.04.2013 by Bochkanov Sergey *************************************************************************/ -void tagheappushi(/* Real */ ae_vector* a, - /* Integer */ ae_vector* b, - ae_int_t* n, - double va, - ae_int_t vb, +void splitlengtheven(ae_int_t tasksize, + ae_int_t* task0, + ae_int_t* task1, ae_state *_state) { - ae_int_t j; - ae_int_t k; - double v; + *task0 = 0; + *task1 = 0; - if( *n<0 ) + ae_assert(tasksize>=2, "SplitLengthEven: TaskSize<2", _state); + if( tasksize==2 ) { + *task0 = 1; + *task1 = 1; return; } - - /* - * N=0 is a special case - */ - if( *n==0 ) + if( tasksize%2==0 ) { - a->ptr.p_double[0] = va; - b->ptr.p_int[0] = vb; - *n = *n+1; - return; + + /* + * Even division + */ + *task0 = tasksize/2; + *task1 = tasksize/2; + if( *task0%2!=0 ) + { + *task0 = *task0+1; + *task1 = *task1-1; + } } - - /* - * add current point to the heap - * (add to the bottom, then move up) - * - * we don't write point to the heap - * until its final position is determined - * (it allow us to reduce number of array access operations) - */ - j = *n; - *n = *n+1; - while(j>0) + else { - k = (j-1)/2; - v = a->ptr.p_double[k]; - if( ae_fp_less(v,va) ) - { - - /* - * swap with higher element - */ - a->ptr.p_double[j] = v; - b->ptr.p_int[j] = b->ptr.p_int[k]; - j = k; - } - else - { - - /* - * element in its place. terminate. - */ - break; - } + + /* + * Odd task size, split trailing odd part from it. + */ + *task0 = tasksize-1; + *task1 = 1; } - a->ptr.p_double[j] = va; - b->ptr.p_int[j] = vb; + ae_assert(*task0>=1, "SplitLengthEven: internal error", _state); + ae_assert(*task1>=1, "SplitLengthEven: internal error", _state); } /************************************************************************* -Heap operations: replaces top element with new element -(which is moved down) +This function is used to calculate number of chunks (including partial, +non-complete chunks) in some set. It expects that ChunkSize>=1, TaskSize>=0. +Assertion is thrown otherwise. -PARAMETERS: - A - heap itself, must be at least array[0..N-1] - B - array of integer tags, which are updated according to - permutations in the heap - N - size of the heap - VA - value of the element which replaces top element - VB - value of the tag +Function result is equivalent to Ceil(TaskSize/ChunkSize), but with guarantees +that rounding errors won't ruin results. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 21.01.2015 by Bochkanov Sergey *************************************************************************/ -void tagheapreplacetopi(/* Real */ ae_vector* a, - /* Integer */ ae_vector* b, - ae_int_t n, - double va, - ae_int_t vb, +ae_int_t chunkscount(ae_int_t tasksize, + ae_int_t chunksize, ae_state *_state) { - ae_int_t j; - ae_int_t k1; - ae_int_t k2; - double v; - double v1; - double v2; + ae_int_t result; - if( n<1 ) - { - return; - } - - /* - * N=1 is a special case - */ - if( n==1 ) - { - a->ptr.p_double[0] = va; - b->ptr.p_int[0] = vb; - return; - } - - /* - * move down through heap: - * * J - current element - * * K1 - first child (always exists) - * * K2 - second child (may not exists) - * - * we don't write point to the heap - * until its final position is determined - * (it allow us to reduce number of array access operations) - */ - j = 0; - k1 = 1; - k2 = 2; - while(k1=0, "ChunksCount: TaskSize<0", _state); + ae_assert(chunksize>=1, "ChunksCount: ChunkSize<1", _state); + result = tasksize/chunksize; + if( tasksize%chunksize!=0 ) { - if( k2>=n ) - { - - /* - * only one child. - * - * swap and terminate (because this child - * have no siblings due to heap structure) - */ - v = a->ptr.p_double[k1]; - if( ae_fp_greater(v,va) ) - { - a->ptr.p_double[j] = v; - b->ptr.p_int[j] = b->ptr.p_int[k1]; - j = k1; - } - break; - } - else - { - - /* - * two childs - */ - v1 = a->ptr.p_double[k1]; - v2 = a->ptr.p_double[k2]; - if( ae_fp_greater(v1,v2) ) - { - if( ae_fp_less(va,v1) ) - { - a->ptr.p_double[j] = v1; - b->ptr.p_int[j] = b->ptr.p_int[k1]; - j = k1; - } - else - { - break; - } - } - else - { - if( ae_fp_less(va,v2) ) - { - a->ptr.p_double[j] = v2; - b->ptr.p_int[j] = b->ptr.p_int[k2]; - j = k2; - } - else - { - break; - } - } - k1 = 2*j+1; - k2 = 2*j+2; - } + result = result+1; } - a->ptr.p_double[j] = va; - b->ptr.p_int[j] = vb; + return result; } /************************************************************************* -Heap operations: pops top element from the heap - -PARAMETERS: - A - heap itself, must be at least array[0..N-1] - B - array of integer tags, which are updated according to - permutations in the heap - N - size of the heap, N>=1 - -On output top element is moved to A[N-1], B[N-1], heap is reordered, N is -decreased by 1. +Returns maximum density for level 2 sparse/dense functions. Density values +below one returned by this function are better to handle via sparse Level 2 +functionality. - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + -- ALGLIB routine -- + 10.01.2019 + Bochkanov Sergey *************************************************************************/ -void tagheappopi(/* Real */ ae_vector* a, - /* Integer */ ae_vector* b, - ae_int_t* n, - ae_state *_state) +double sparselevel2density(ae_state *_state) { - double va; - ae_int_t vb; + double result; - if( *n<1 ) - { - return; - } - - /* - * N=1 is a special case - */ - if( *n==1 ) - { - *n = 0; - return; - } - - /* - * swap top element and last element, - * then reorder heap - */ - va = a->ptr.p_double[*n-1]; - vb = b->ptr.p_int[*n-1]; - a->ptr.p_double[*n-1] = a->ptr.p_double[0]; - b->ptr.p_int[*n-1] = b->ptr.p_int[0]; - *n = *n-1; - tagheapreplacetopi(a, b, *n, va, vb, _state); + result = 0.1; + return result; } /************************************************************************* -Search first element less than T in sorted array. +Returns A-tile size for a matrix. -PARAMETERS: - A - sorted array by ascending from 0 to N-1 - N - number of elements in array - T - the desired element +A-tiles are smallest tiles (32x32), suitable for processing by ALGLIB own +implementation of Level 3 linear algebra. -RESULT: - The very first element's index, which isn't less than T. -In the case when there aren't such elements, returns N. + -- ALGLIB routine -- + 10.01.2019 + Bochkanov Sergey *************************************************************************/ -ae_int_t lowerbound(/* Real */ ae_vector* a, - ae_int_t n, - double t, - ae_state *_state) +ae_int_t matrixtilesizea(ae_state *_state) { - ae_int_t l; - ae_int_t half; - ae_int_t first; - ae_int_t middle; ae_int_t result; - l = n; - first = 0; - while(l>0) - { - half = l/2; - middle = first+half; - if( ae_fp_less(a->ptr.p_double[middle],t) ) - { - first = middle+1; - l = l-half-1; - } - else - { - l = half; - } - } - result = first; + result = 32; return result; } /************************************************************************* -Search first element more than T in sorted array. +Returns B-tile size for a matrix. -PARAMETERS: - A - sorted array by ascending from 0 to N-1 - N - number of elements in array - T - the desired element +B-tiles are larger tiles (64x64), suitable for parallel execution or for +processing by vendor's implementation of Level 3 linear algebra. - RESULT: - The very first element's index, which more than T. -In the case when there aren't such elements, returns N. + -- ALGLIB routine -- + 10.01.2019 + Bochkanov Sergey *************************************************************************/ -ae_int_t upperbound(/* Real */ ae_vector* a, - ae_int_t n, - double t, - ae_state *_state) +ae_int_t matrixtilesizeb(ae_state *_state) { - ae_int_t l; - ae_int_t half; - ae_int_t first; - ae_int_t middle; +#ifndef ALGLIB_INTERCEPTS_MKL ae_int_t result; - l = n; - first = 0; - while(l>0) - { - half = l/2; - middle = first+half; - if( ae_fp_less(t,a->ptr.p_double[middle]) ) - { - l = half; - } - else - { - first = middle+1; - l = l-half-1; - } - } - result = first; + result = 64; return result; +#else + return _ialglib_i_matrixtilesizeb(); +#endif } /************************************************************************* -Internal TagSortFastI: sorts A[I1...I2] (both bounds are included), -applies same permutations to B. +This function returns minimum cost of task which is feasible for +multithreaded processing. It returns real number in order to avoid overflow +problems. -- ALGLIB -- - Copyright 06.09.2010 by Bochkanov Sergey + Copyright 10.01.2018 by Bochkanov Sergey *************************************************************************/ -static void tsort_tagsortfastirec(/* Real */ ae_vector* a, - /* Integer */ ae_vector* b, - /* Real */ ae_vector* bufa, - /* Integer */ ae_vector* bufb, +double smpactivationlevel(ae_state *_state) +{ + double nn; + double result; + + + nn = (double)(2*matrixtilesizeb(_state)); + result = ae_maxreal(0.95*2*nn*nn*nn, 1.0E7, _state); + return result; +} + + +/************************************************************************* +This function returns minimum cost of task which is feasible for +spawn (given that multithreading is active). + +It returns real number in order to avoid overflow problems. + + -- ALGLIB -- + Copyright 10.01.2018 by Bochkanov Sergey +*************************************************************************/ +double spawnlevel(ae_state *_state) +{ + double nn; + double result; + + + nn = (double)(2*matrixtilesizea(_state)); + result = 0.95*2*nn*nn*nn; + return result; +} + + +/************************************************************************* +--- OBSOLETE FUNCTION, USE TILED SPLIT INSTEAD --- + +This function is used in parallel functions for recurrent division of large +task into two smaller tasks. + +It has following properties: +* it works only for TaskSize>=2 and ChunkSize>=2 + (assertion is thrown otherwise) +* Task0+Task1=TaskSize, Task0>0, Task1>0 +* Task0 and Task1 are close to each other +* in case TaskSize>ChunkSize, Task0 is always divisible by ChunkSize + + -- ALGLIB -- + Copyright 07.04.2013 by Bochkanov Sergey +*************************************************************************/ +void splitlength(ae_int_t tasksize, + ae_int_t chunksize, + ae_int_t* task0, + ae_int_t* task1, + ae_state *_state) +{ + + *task0 = 0; + *task1 = 0; + + ae_assert(chunksize>=2, "SplitLength: ChunkSize<2", _state); + ae_assert(tasksize>=2, "SplitLength: TaskSize<2", _state); + *task0 = tasksize/2; + if( *task0>chunksize&&*task0%chunksize!=0 ) + { + *task0 = *task0-*task0%chunksize; + } + *task1 = tasksize-(*task0); + ae_assert(*task0>=1, "SplitLength: internal error", _state); + ae_assert(*task1>=1, "SplitLength: internal error", _state); +} + + +/************************************************************************* +Outputs vector A[I0,I1-1] to trace log using either: +a) 6-digit exponential format (no trace flags is set) +b) 15-ditit exponential format ('PREC.E15' trace flag is set) +b) 6-ditit fixed-point format ('PREC.F6' trace flag is set) + +This function checks trace flags every time it is called. +*************************************************************************/ +void tracevectorautoprec(/* Real */ ae_vector* a, + ae_int_t i0, ae_int_t i1, - ae_int_t i2, ae_state *_state) { ae_int_t i; - ae_int_t j; - ae_int_t k; - ae_int_t cntless; - ae_int_t cnteq; - ae_int_t cntgreater; - double tmpr; - ae_int_t tmpi; - double v0; - double v1; - double v2; - double vp; + ae_int_t prectouse; /* - * Fast exit + * Determine precision to use */ - if( i2<=i1 ) + prectouse = 0; + if( ae_is_trace_enabled("PREC.E15") ) { - return; + prectouse = 1; + } + if( ae_is_trace_enabled("PREC.F6") ) + { + prectouse = 2; } /* - * Non-recursive sort for small arrays + * Output */ - if( i2-i1<=16 ) + ae_trace("[ "); + for(i=i0; i<=i1-1; i++) { - for(j=i1+1; j<=i2; j++) + if( prectouse==0 ) { - - /* - * Search elements [I1..J-1] for place to insert Jth element. - * - * This code stops immediately if we can leave A[J] at J-th position - * (all elements have same value of A[J] larger than any of them) - */ - tmpr = a->ptr.p_double[j]; - tmpi = j; - for(k=j-1; k>=i1; k--) - { - if( a->ptr.p_double[k]<=tmpr ) - { - break; - } - tmpi = k; - } - k = tmpi; - - /* - * Insert Jth element into Kth position - */ - if( k!=j ) - { - tmpr = a->ptr.p_double[j]; - tmpi = b->ptr.p_int[j]; - for(i=j-1; i>=k; i--) - { - a->ptr.p_double[i+1] = a->ptr.p_double[i]; - b->ptr.p_int[i+1] = b->ptr.p_int[i]; - } - a->ptr.p_double[k] = tmpr; - b->ptr.p_int[k] = tmpi; - } + ae_trace("%14.6e", + (double)(a->ptr.p_double[i])); + } + if( prectouse==1 ) + { + ae_trace("%23.15e", + (double)(a->ptr.p_double[i])); + } + if( prectouse==2 ) + { + ae_trace("%13.6f", + (double)(a->ptr.p_double[i])); + } + if( i=2 + * Determine precision to use */ - v0 = a->ptr.p_double[i1]; - v1 = a->ptr.p_double[i1+(i2-i1)/2]; - v2 = a->ptr.p_double[i2]; - if( v0>v1 ) - { - tmpr = v1; - v1 = v0; - v0 = tmpr; - } - if( v1>v2 ) + prectouse = 0; + if( ae_is_trace_enabled("PREC.E15") ) { - tmpr = v2; - v2 = v1; - v1 = tmpr; + prectouse = 1; } - if( v0>v1 ) + if( ae_is_trace_enabled("PREC.F6") ) { - tmpr = v1; - v1 = v0; - v0 = tmpr; + prectouse = 2; } - vp = v1; /* - * now pass through A/B and: - * * move elements that are LESS than VP to the left of A/B - * * move elements that are EQUAL to VP to the right of BufA/BufB (in the reverse order) - * * move elements that are GREATER than VP to the left of BufA/BufB (in the normal order - * * move elements from the tail of BufA/BufB to the middle of A/B (restoring normal order) - * * move elements from the left of BufA/BufB to the end of A/B + * Output */ - cntless = 0; - cnteq = 0; - cntgreater = 0; - for(i=i1; i<=i2; i++) + ae_trace("[ "); + for(i=0; i<=n-1; i++) { - v0 = a->ptr.p_double[i]; - if( v0ptr.p_double[i]; + if( applyscl ) { - - /* - * LESS - */ - k = i1+cntless; - if( i!=k ) - { - a->ptr.p_double[k] = v0; - b->ptr.p_int[k] = b->ptr.p_int[i]; - } - cntless = cntless+1; - continue; + v = v*scl->ptr.p_double[i]; } - if( v0==vp ) + if( applysft ) { - - /* - * EQUAL - */ - k = i2-cnteq; - bufa->ptr.p_double[k] = v0; - bufb->ptr.p_int[k] = b->ptr.p_int[i]; - cnteq = cnteq+1; - continue; + v = v+sft->ptr.p_double[i]; + } + if( prectouse==0 ) + { + ae_trace("%14.6e", + (double)(v)); + } + if( prectouse==1 ) + { + ae_trace("%23.15e", + (double)(v)); + } + if( prectouse==2 ) + { + ae_trace("%13.6f", + (double)(v)); + } + if( iptr.p_double[k] = v0; - bufb->ptr.p_int[k] = b->ptr.p_int[i]; - cntgreater = cntgreater+1; - } - for(i=0; i<=cnteq-1; i++) - { - j = i1+cntless+cnteq-1-i; - k = i2+i-(cnteq-1); - a->ptr.p_double[j] = bufa->ptr.p_double[k]; - b->ptr.p_int[j] = bufb->ptr.p_int[k]; - } - for(i=0; i<=cntgreater-1; i++) - { - j = i1+cntless+cnteq+i; - k = i1+i; - a->ptr.p_double[j] = bufa->ptr.p_double[k]; - b->ptr.p_int[j] = bufb->ptr.p_int[k]; } - - /* - * Sort left and right parts of the array (ignoring middle part) - */ - tsort_tagsortfastirec(a, b, bufa, bufb, i1, i1+cntless-1, _state); - tsort_tagsortfastirec(a, b, bufa, bufb, i1+cntless+cnteq, i2, _state); + ae_trace(" ]"); } /************************************************************************* -Internal TagSortFastR: sorts A[I1...I2] (both bounds are included), -applies same permutations to B. +Outputs vector of 1-norms of rows [I0,I1-1] of A[I0...I1-1,J0...J1-1] to +trace log using either: +a) 6-digit exponential format (no trace flags is set) +b) 15-ditit exponential format ('PREC.E15' trace flag is set) +b) 6-ditit fixed-point format ('PREC.F6' trace flag is set) - -- ALGLIB -- - Copyright 06.09.2010 by Bochkanov Sergey +This function checks trace flags every time it is called. *************************************************************************/ -static void tsort_tagsortfastrrec(/* Real */ ae_vector* a, - /* Real */ ae_vector* b, - /* Real */ ae_vector* bufa, - /* Real */ ae_vector* bufb, +void tracerownrm1autoprec(/* Real */ ae_matrix* a, + ae_int_t i0, ae_int_t i1, - ae_int_t i2, + ae_int_t j0, + ae_int_t j1, ae_state *_state) { ae_int_t i; ae_int_t j; - ae_int_t k; - double tmpr; - double tmpr2; - ae_int_t tmpi; - ae_int_t cntless; - ae_int_t cnteq; - ae_int_t cntgreater; - double v0; - double v1; - double v2; - double vp; + double v; + ae_int_t prectouse; /* - * Fast exit - */ - if( i2<=i1 ) - { - return; - } - - /* - * Non-recursive sort for small arrays - */ - if( i2-i1<=16 ) - { - for(j=i1+1; j<=i2; j++) - { - - /* - * Search elements [I1..J-1] for place to insert Jth element. - * - * This code stops immediatly if we can leave A[J] at J-th position - * (all elements have same value of A[J] larger than any of them) - */ - tmpr = a->ptr.p_double[j]; - tmpi = j; - for(k=j-1; k>=i1; k--) - { - if( a->ptr.p_double[k]<=tmpr ) - { - break; - } - tmpi = k; - } - k = tmpi; - - /* - * Insert Jth element into Kth position - */ - if( k!=j ) - { - tmpr = a->ptr.p_double[j]; - tmpr2 = b->ptr.p_double[j]; - for(i=j-1; i>=k; i--) - { - a->ptr.p_double[i+1] = a->ptr.p_double[i]; - b->ptr.p_double[i+1] = b->ptr.p_double[i]; - } - a->ptr.p_double[k] = tmpr; - b->ptr.p_double[k] = tmpr2; - } - } - return; - } - - /* - * Quicksort: choose pivot - * Here we assume that I2-I1>=16 + * Determine precision to use */ - v0 = a->ptr.p_double[i1]; - v1 = a->ptr.p_double[i1+(i2-i1)/2]; - v2 = a->ptr.p_double[i2]; - if( v0>v1 ) - { - tmpr = v1; - v1 = v0; - v0 = tmpr; - } - if( v1>v2 ) + prectouse = 0; + if( ae_is_trace_enabled("PREC.E15") ) { - tmpr = v2; - v2 = v1; - v1 = tmpr; + prectouse = 1; } - if( v0>v1 ) + if( ae_is_trace_enabled("PREC.F6") ) { - tmpr = v1; - v1 = v0; - v0 = tmpr; + prectouse = 2; } - vp = v1; /* - * now pass through A/B and: - * * move elements that are LESS than VP to the left of A/B - * * move elements that are EQUAL to VP to the right of BufA/BufB (in the reverse order) - * * move elements that are GREATER than VP to the left of BufA/BufB (in the normal order - * * move elements from the tail of BufA/BufB to the middle of A/B (restoring normal order) - * * move elements from the left of BufA/BufB to the end of A/B + * Output */ - cntless = 0; - cnteq = 0; - cntgreater = 0; - for(i=i1; i<=i2; i++) + ae_trace("[ "); + for(i=i0; i<=i1-1; i++) { - v0 = a->ptr.p_double[i]; - if( v0ptr.p_double[k] = v0; - b->ptr.p_double[k] = b->ptr.p_double[i]; - } - cntless = cntless+1; - continue; + v = ae_maxreal(v, ae_fabs(a->ptr.pp_double[i][j], _state), _state); } - if( v0==vp ) + if( prectouse==0 ) { - - /* - * EQUAL - */ - k = i2-cnteq; - bufa->ptr.p_double[k] = v0; - bufb->ptr.p_double[k] = b->ptr.p_double[i]; - cnteq = cnteq+1; - continue; + ae_trace("%14.6e", + (double)(v)); + } + if( prectouse==1 ) + { + ae_trace("%23.15e", + (double)(v)); + } + if( prectouse==2 ) + { + ae_trace("%13.6f", + (double)(v)); + } + if( iptr.p_double[k] = v0; - bufb->ptr.p_double[k] = b->ptr.p_double[i]; - cntgreater = cntgreater+1; } - for(i=0; i<=cnteq-1; i++) + ae_trace(" ]"); +} + + +/************************************************************************* +Outputs vector A[I0,I1-1] to trace log using E8 precision +*************************************************************************/ +void tracevectore6(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_state *_state) +{ + ae_int_t i; + + + ae_trace("[ "); + for(i=i0; i<=i1-1; i++) { - j = i1+cntless+cnteq-1-i; - k = i2+i-(cnteq-1); - a->ptr.p_double[j] = bufa->ptr.p_double[k]; - b->ptr.p_double[j] = bufb->ptr.p_double[k]; + ae_trace("%14.6e", + (double)(a->ptr.p_double[i])); + if( iptr.p_double[j] = bufa->ptr.p_double[k]; - b->ptr.p_double[j] = bufb->ptr.p_double[k]; + if( usee15 ) + { + ae_trace("%23.15e", + (double)(a->ptr.p_double[i])); + } + else + { + ae_trace("%14.6e", + (double)(a->ptr.p_double[i])); + } + if( iptr.p_double[j]; - tmpi = j; - for(k=j-1; k>=i1; k--) - { - if( a->ptr.p_double[k]<=tmpr ) - { - break; - } - tmpi = k; - } - k = tmpi; - - /* - * Insert Jth element into Kth position - */ - if( k!=j ) - { - tmpr = a->ptr.p_double[j]; - for(i=j-1; i>=k; i--) - { - a->ptr.p_double[i+1] = a->ptr.p_double[i]; - } - a->ptr.p_double[k] = tmpr; - } + v = ae_maxreal(v, ae_fabs(a->ptr.pp_double[i][j], _state), _state); } - return; - } - - /* - * Quicksort: choose pivot - * Here we assume that I2-I1>=16 - */ - v0 = a->ptr.p_double[i1]; - v1 = a->ptr.p_double[i1+(i2-i1)/2]; - v2 = a->ptr.p_double[i2]; - if( v0>v1 ) - { - tmpr = v1; - v1 = v0; - v0 = tmpr; - } - if( v1>v2 ) - { - tmpr = v2; - v2 = v1; - v1 = tmpr; - } - if( v0>v1 ) - { - tmpr = v1; - v1 = v0; - v0 = tmpr; - } - vp = v1; - - /* - * now pass through A/B and: - * * move elements that are LESS than VP to the left of A/B - * * move elements that are EQUAL to VP to the right of BufA/BufB (in the reverse order) - * * move elements that are GREATER than VP to the left of BufA/BufB (in the normal order - * * move elements from the tail of BufA/BufB to the middle of A/B (restoring normal order) - * * move elements from the left of BufA/BufB to the end of A/B - */ - cntless = 0; - cnteq = 0; - cntgreater = 0; - for(i=i1; i<=i2; i++) - { - v0 = a->ptr.p_double[i]; - if( v0ptr.p_double[k] = v0; - } - cntless = cntless+1; - continue; - } - if( v0==vp ) + ae_trace("%14.6e", + (double)(v)); + if( iptr.p_double[k] = v0; - cnteq = cnteq+1; - continue; + ae_trace(" "); } - - /* - * GREATER - */ - k = i1+cntgreater; - bufa->ptr.p_double[k] = v0; - cntgreater = cntgreater+1; - } - for(i=0; i<=cnteq-1; i++) - { - j = i1+cntless+cnteq-1-i; - k = i2+i-(cnteq-1); - a->ptr.p_double[j] = bufa->ptr.p_double[k]; - } - for(i=0; i<=cntgreater-1; i++) - { - j = i1+cntless+cnteq+i; - k = i1+i; - a->ptr.p_double[j] = bufa->ptr.p_double[k]; } - - /* - * Sort left and right parts of the array (ignoring middle part) - */ - tsort_tagsortfastrec(a, bufa, i1, i1+cntless-1, _state); - tsort_tagsortfastrec(a, bufa, i1+cntless+cnteq, i2, _state); + ae_trace(" ]"); } +void _apbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + apbuffers *p = (apbuffers*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->ba0, 0, DT_BOOL, _state, make_automatic); + ae_vector_init(&p->ia0, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->ia1, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->ia2, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->ia3, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->ra0, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->ra1, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->ra2, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->ra3, 0, DT_REAL, _state, make_automatic); + ae_matrix_init(&p->rm0, 0, 0, DT_REAL, _state, make_automatic); + ae_matrix_init(&p->rm1, 0, 0, DT_REAL, _state, make_automatic); +} -/************************************************************************* -Internal ranking subroutine. +void _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + apbuffers *dst = (apbuffers*)_dst; + apbuffers *src = (apbuffers*)_src; + ae_vector_init_copy(&dst->ba0, &src->ba0, _state, make_automatic); + ae_vector_init_copy(&dst->ia0, &src->ia0, _state, make_automatic); + ae_vector_init_copy(&dst->ia1, &src->ia1, _state, make_automatic); + ae_vector_init_copy(&dst->ia2, &src->ia2, _state, make_automatic); + ae_vector_init_copy(&dst->ia3, &src->ia3, _state, make_automatic); + ae_vector_init_copy(&dst->ra0, &src->ra0, _state, make_automatic); + ae_vector_init_copy(&dst->ra1, &src->ra1, _state, make_automatic); + ae_vector_init_copy(&dst->ra2, &src->ra2, _state, make_automatic); + ae_vector_init_copy(&dst->ra3, &src->ra3, _state, make_automatic); + ae_matrix_init_copy(&dst->rm0, &src->rm0, _state, make_automatic); + ae_matrix_init_copy(&dst->rm1, &src->rm1, _state, make_automatic); +} -INPUT PARAMETERS: - X - array to rank - N - array size - IsCentered- whether ranks are centered or not: - * True - ranks are centered in such way that their - sum is zero - * False - ranks are not centered - Buf - temporary buffers - -NOTE: when IsCentered is True and all X[] are equal, this function fills - X by zeros (exact zeros are used, not sum which is only approximately - equal to zero). -*************************************************************************/ -void rankx(/* Real */ ae_vector* x, - ae_int_t n, - ae_bool iscentered, - apbuffers* buf, - ae_state *_state) + +void _apbuffers_clear(void* _p) { - ae_int_t i; - ae_int_t j; - ae_int_t k; - double tmp; - double voffs; + apbuffers *p = (apbuffers*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->ba0); + ae_vector_clear(&p->ia0); + ae_vector_clear(&p->ia1); + ae_vector_clear(&p->ia2); + ae_vector_clear(&p->ia3); + ae_vector_clear(&p->ra0); + ae_vector_clear(&p->ra1); + ae_vector_clear(&p->ra2); + ae_vector_clear(&p->ra3); + ae_matrix_clear(&p->rm0); + ae_matrix_clear(&p->rm1); +} - - /* - * Prepare - */ - if( n<1 ) - { - return; - } - if( n==1 ) - { - x->ptr.p_double[0] = (double)(0); - return; - } - if( buf->ra1.cntra1, n, _state); - } - if( buf->ia1.cntia1, n, _state); - } - for(i=0; i<=n-1; i++) - { - buf->ra1.ptr.p_double[i] = x->ptr.p_double[i]; - buf->ia1.ptr.p_int[i] = i; - } - tagsortfasti(&buf->ra1, &buf->ia1, &buf->ra2, &buf->ia2, n, _state); - - /* - * Special test for all values being equal - */ - if( ae_fp_eq(buf->ra1.ptr.p_double[0],buf->ra1.ptr.p_double[n-1]) ) - { - if( iscentered ) - { - tmp = 0.0; - } - else - { - tmp = (double)(n-1)/(double)2; - } - for(i=0; i<=n-1; i++) - { - x->ptr.p_double[i] = tmp; - } - return; - } - - /* - * compute tied ranks - */ - i = 0; - while(i<=n-1) - { - j = i+1; - while(j<=n-1) - { - if( ae_fp_neq(buf->ra1.ptr.p_double[j],buf->ra1.ptr.p_double[i]) ) - { - break; - } - j = j+1; - } - for(k=i; k<=j-1; k++) - { - buf->ra1.ptr.p_double[k] = (double)(i+j-1)/(double)2; - } - i = j; - } - - /* - * back to x - */ - if( iscentered ) - { - voffs = (double)(n-1)/(double)2; - } - else - { - voffs = 0.0; - } - for(i=0; i<=n-1; i++) - { - x->ptr.p_double[buf->ia1.ptr.p_int[i]] = buf->ra1.ptr.p_double[i]-voffs; - } +void _apbuffers_destroy(void* _p) +{ + apbuffers *p = (apbuffers*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->ba0); + ae_vector_destroy(&p->ia0); + ae_vector_destroy(&p->ia1); + ae_vector_destroy(&p->ia2); + ae_vector_destroy(&p->ia3); + ae_vector_destroy(&p->ra0); + ae_vector_destroy(&p->ra1); + ae_vector_destroy(&p->ra2); + ae_vector_destroy(&p->ra3); + ae_matrix_destroy(&p->rm0); + ae_matrix_destroy(&p->rm1); } +void _sboolean_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + sboolean *p = (sboolean*)_p; + ae_touch_ptr((void*)p); +} -/************************************************************************* -Fast kernel +void _sboolean_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + sboolean *dst = (sboolean*)_dst; + sboolean *src = (sboolean*)_src; + dst->val = src->val; +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixrank1f(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Complex */ ae_vector* u, - ae_int_t iu, - /* Complex */ ae_vector* v, - ae_int_t iv, - ae_state *_state) + +void _sboolean_clear(void* _p) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + sboolean *p = (sboolean*)_p; + ae_touch_ptr((void*)p); +} - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixrank1f(m, n, a, ia, ja, u, iu, v, iv); -#endif +void _sboolean_destroy(void* _p) +{ + sboolean *p = (sboolean*)_p; + ae_touch_ptr((void*)p); } -/************************************************************************* -Fast kernel - - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixrank1f(ae_int_t m, - ae_int_t n, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_vector* u, - ae_int_t iu, - /* Real */ ae_vector* v, - ae_int_t iv, - ae_state *_state) +void _sbooleanarray_init(void* _p, ae_state *_state, ae_bool make_automatic) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + sbooleanarray *p = (sbooleanarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->val, 0, DT_BOOL, _state, make_automatic); +} - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixrank1f(m, n, a, ia, ja, u, iu, v, iv); -#endif +void _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + sbooleanarray *dst = (sbooleanarray*)_dst; + sbooleanarray *src = (sbooleanarray*)_src; + ae_vector_init_copy(&dst->val, &src->val, _state, make_automatic); } -/************************************************************************* -Fast kernel - - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixmvf(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t opa, - /* Complex */ ae_vector* x, - ae_int_t ix, - /* Complex */ ae_vector* y, - ae_int_t iy, - ae_state *_state) +void _sbooleanarray_clear(void* _p) { - ae_bool result; + sbooleanarray *p = (sbooleanarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->val); +} - result = ae_false; - return result; +void _sbooleanarray_destroy(void* _p) +{ + sbooleanarray *p = (sbooleanarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->val); } -/************************************************************************* -Fast kernel - - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixmvf(ae_int_t m, - ae_int_t n, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t opa, - /* Real */ ae_vector* x, - ae_int_t ix, - /* Real */ ae_vector* y, - ae_int_t iy, - ae_state *_state) +void _sinteger_init(void* _p, ae_state *_state, ae_bool make_automatic) { - ae_bool result; + sinteger *p = (sinteger*)_p; + ae_touch_ptr((void*)p); +} - result = ae_false; - return result; +void _sinteger_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + sinteger *dst = (sinteger*)_dst; + sinteger *src = (sinteger*)_src; + dst->val = src->val; } -/************************************************************************* -Fast kernel - - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixrighttrsmf(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Complex */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state) +void _sinteger_clear(void* _p) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + sinteger *p = (sinteger*)_p; + ae_touch_ptr((void*)p); +} - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixrighttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif +void _sinteger_destroy(void* _p) +{ + sinteger *p = (sinteger*)_p; + ae_touch_ptr((void*)p); } -/************************************************************************* -Fast kernel - - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixlefttrsmf(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Complex */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state) +void _sintegerarray_init(void* _p, ae_state *_state, ae_bool make_automatic) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + sintegerarray *p = (sintegerarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->val, 0, DT_INT, _state, make_automatic); +} - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixlefttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif +void _sintegerarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + sintegerarray *dst = (sintegerarray*)_dst; + sintegerarray *src = (sintegerarray*)_src; + ae_vector_init_copy(&dst->val, &src->val, _state, make_automatic); } -/************************************************************************* -Fast kernel +void _sintegerarray_clear(void* _p) +{ + sintegerarray *p = (sintegerarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->val); +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixrighttrsmf(ae_int_t m, - ae_int_t n, - /* Real */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Real */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state) + +void _sintegerarray_destroy(void* _p) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + sintegerarray *p = (sintegerarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->val); +} - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixrighttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif +void _sreal_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + sreal *p = (sreal*)_p; + ae_touch_ptr((void*)p); } -/************************************************************************* -Fast kernel +void _sreal_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + sreal *dst = (sreal*)_dst; + sreal *src = (sreal*)_src; + dst->val = src->val; +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixlefttrsmf(ae_int_t m, - ae_int_t n, - /* Real */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Real */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state) + +void _sreal_clear(void* _p) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + sreal *p = (sreal*)_p; + ae_touch_ptr((void*)p); +} - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixlefttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif +void _sreal_destroy(void* _p) +{ + sreal *p = (sreal*)_p; + ae_touch_ptr((void*)p); } -/************************************************************************* -Fast kernel +void _srealarray_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + srealarray *p = (srealarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->val, 0, DT_REAL, _state, make_automatic); +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixherkf(ae_int_t n, - ae_int_t k, - double alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_bool isupper, - ae_state *_state) + +void _srealarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + srealarray *dst = (srealarray*)_dst; + srealarray *src = (srealarray*)_src; + ae_vector_init_copy(&dst->val, &src->val, _state, make_automatic); +} - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixherkf(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); -#endif +void _srealarray_clear(void* _p) +{ + srealarray *p = (srealarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->val); } -/************************************************************************* -Fast kernel +void _srealarray_destroy(void* _p) +{ + srealarray *p = (srealarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->val); +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixsyrkf(ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_bool isupper, - ae_state *_state) + +void _scomplex_init(void* _p, ae_state *_state, ae_bool make_automatic) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + scomplex *p = (scomplex*)_p; + ae_touch_ptr((void*)p); +} - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixsyrkf(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); -#endif +void _scomplex_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + scomplex *dst = (scomplex*)_dst; + scomplex *src = (scomplex*)_src; + dst->val = src->val; } -/************************************************************************* -Fast kernel +void _scomplex_clear(void* _p) +{ + scomplex *p = (scomplex*)_p; + ae_touch_ptr((void*)p); +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixgemmf(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state) + +void _scomplex_destroy(void* _p) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + scomplex *p = (scomplex*)_p; + ae_touch_ptr((void*)p); +} - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); -#endif +void _scomplexarray_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + scomplexarray *p = (scomplexarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->val, 0, DT_COMPLEX, _state, make_automatic); } -/************************************************************************* -Fast kernel +void _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + scomplexarray *dst = (scomplexarray*)_dst; + scomplexarray *src = (scomplexarray*)_src; + ae_vector_init_copy(&dst->val, &src->val, _state, make_automatic); +} - -- ALGLIB routine -- - 19.01.2010 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixgemmf(ae_int_t m, - ae_int_t n, - ae_int_t k, - ae_complex alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Complex */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - ae_complex beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state) + +void _scomplexarray_clear(void* _p) { -#ifndef ALGLIB_INTERCEPTS_ABLAS - ae_bool result; + scomplexarray *p = (scomplexarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->val); +} - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); -#endif +void _scomplexarray_destroy(void* _p) +{ + scomplexarray *p = (scomplexarray*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->val); } -/************************************************************************* -CMatrixGEMM kernel, basecase code for CMatrixGEMM. +#endif +#if defined(AE_COMPILE_TSORT) || !defined(AE_PARTIAL_BUILD) -This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where: -* C is MxN general matrix -* op1(A) is MxK matrix -* op2(B) is KxN matrix -* "op" may be identity transformation, transposition, conjugate transposition -Additional info: -* multiplication result replaces C. If Beta=0, C elements are not used in - calculations (not multiplied by zero - just not referenced) -* if Alpha=0, A is not used (not multiplied by zero - just not referenced) -* if both Beta and Alpha are zero, C is filled by zeros. +/************************************************************************* +This function sorts array of real keys by ascending. -IMPORTANT: +Its results are: +* sorted array A +* permutation tables P1, P2 -This function does NOT preallocate output matrix C, it MUST be preallocated -by caller prior to calling this function. In case C does not have enough -space to store result, exception will be generated. +Algorithm outputs permutation tables using two formats: +* as usual permutation of [0..N-1]. If P1[i]=j, then sorted A[i] contains + value which was moved there from J-th position. +* as a sequence of pairwise permutations. Sorted A[] may be obtained by + swaping A[i] and A[P2[i]] for all i from 0 to N-1. + +INPUT PARAMETERS: + A - unsorted array + N - array size -INPUT PARAMETERS - M - matrix size, M>0 - N - matrix size, N>0 - K - matrix size, K>0 - Alpha - coefficient - A - matrix - IA - submatrix offset - JA - submatrix offset - OpTypeA - transformation type: - * 0 - no transformation - * 1 - transposition - * 2 - conjugate transposition - B - matrix - IB - submatrix offset - JB - submatrix offset - OpTypeB - transformation type: - * 0 - no transformation - * 1 - transposition - * 2 - conjugate transposition - Beta - coefficient - C - PREALLOCATED output matrix - IC - submatrix offset - JC - submatrix offset +OUPUT PARAMETERS: + A - sorted array + P1, P2 - permutation tables, array[N] + +NOTES: + this function assumes that A[] is finite; it doesn't checks that + condition. All other conditions (size of input arrays, etc.) are not + checked too. - -- ALGLIB routine -- - 27.03.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 14.05.2008 by Bochkanov Sergey *************************************************************************/ -void cmatrixgemmk(ae_int_t m, +void tagsort(/* Real */ ae_vector* a, ae_int_t n, - ae_int_t k, - ae_complex alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Complex */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - ae_complex beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, + /* Integer */ ae_vector* p1, + /* Integer */ ae_vector* p2, ae_state *_state) { - ae_int_t i; - ae_int_t j; - ae_complex v; - ae_complex v00; - ae_complex v01; - ae_complex v10; - ae_complex v11; - double v00x; - double v00y; - double v01x; - double v01y; - double v10x; - double v10y; - double v11x; - double v11y; - double a0x; - double a0y; - double a1x; - double a1y; - double b0x; - double b0y; - double b1x; - double b1y; - ae_int_t idxa0; - ae_int_t idxa1; - ae_int_t idxb0; - ae_int_t idxb1; - ae_int_t i0; - ae_int_t i1; - ae_int_t ik; - ae_int_t j0; - ae_int_t j1; - ae_int_t jk; - ae_int_t t; - ae_int_t offsa; - ae_int_t offsb; + ae_frame _frame_block; + apbuffers buf; + + ae_frame_make(_state, &_frame_block); + memset(&buf, 0, sizeof(buf)); + ae_vector_clear(p1); + ae_vector_clear(p2); + _apbuffers_init(&buf, _state, ae_true); + + tagsortbuf(a, n, p1, p2, &buf, _state); + ae_frame_leave(_state); +} + + +/************************************************************************* +Buffered variant of TagSort, which accepts preallocated output arrays as +well as special structure for buffered allocations. If arrays are too +short, they are reallocated. If they are large enough, no memory +allocation is done. + +It is intended to be used in the performance-critical parts of code, where +additional allocations can lead to severe performance degradation + + -- ALGLIB -- + Copyright 14.05.2008 by Bochkanov Sergey +*************************************************************************/ +void tagsortbuf(/* Real */ ae_vector* a, + ae_int_t n, + /* Integer */ ae_vector* p1, + /* Integer */ ae_vector* p2, + apbuffers* buf, + ae_state *_state) +{ + ae_int_t i; + ae_int_t lv; + ae_int_t lp; + ae_int_t rv; + ae_int_t rp; /* - * if matrix size is zero + * Special cases */ - if( m==0||n==0 ) + if( n<=0 ) { return; } + if( n==1 ) + { + ivectorsetlengthatleast(p1, 1, _state); + ivectorsetlengthatleast(p2, 1, _state); + p1->ptr.p_int[0] = 0; + p2->ptr.p_int[0] = 0; + return; + } /* - * Try optimized code + * General case, N>1: prepare permutations table P1 */ - if( cmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc, _state) ) + ivectorsetlengthatleast(p1, n, _state); + for(i=0; i<=n-1; i++) + { + p1->ptr.p_int[i] = i; + } + + /* + * General case, N>1: sort, update P1 + */ + rvectorsetlengthatleast(&buf->ra0, n, _state); + ivectorsetlengthatleast(&buf->ia0, n, _state); + tagsortfasti(a, p1, &buf->ra0, &buf->ia0, n, _state); + + /* + * General case, N>1: fill permutations table P2 + * + * To fill P2 we maintain two arrays: + * * PV (Buf.IA0), Position(Value). PV[i] contains position of I-th key at the moment + * * VP (Buf.IA1), Value(Position). VP[i] contains key which has position I at the moment + * + * At each step we making permutation of two items: + * Left, which is given by position/value pair LP/LV + * and Right, which is given by RP/RV + * and updating PV[] and VP[] correspondingly. + */ + ivectorsetlengthatleast(&buf->ia0, n, _state); + ivectorsetlengthatleast(&buf->ia1, n, _state); + ivectorsetlengthatleast(p2, n, _state); + for(i=0; i<=n-1; i++) + { + buf->ia0.ptr.p_int[i] = i; + buf->ia1.ptr.p_int[i] = i; + } + for(i=0; i<=n-1; i++) + { + + /* + * calculate LP, LV, RP, RV + */ + lp = i; + lv = buf->ia1.ptr.p_int[lp]; + rv = p1->ptr.p_int[i]; + rp = buf->ia0.ptr.p_int[rv]; + + /* + * Fill P2 + */ + p2->ptr.p_int[i] = rp; + + /* + * update PV and VP + */ + buf->ia1.ptr.p_int[lp] = rv; + buf->ia1.ptr.p_int[rp] = lv; + buf->ia0.ptr.p_int[lv] = rp; + buf->ia0.ptr.p_int[rv] = lp; + } +} + + +/************************************************************************* +Same as TagSort, but optimized for real keys and integer labels. + +A is sorted, and same permutations are applied to B. + +NOTES: +1. this function assumes that A[] is finite; it doesn't checks that + condition. All other conditions (size of input arrays, etc.) are not + checked too. +2. this function uses two buffers, BufA and BufB, each is N elements large. + They may be preallocated (which will save some time) or not, in which + case function will automatically allocate memory. + + -- ALGLIB -- + Copyright 11.12.2008 by Bochkanov Sergey +*************************************************************************/ +void tagsortfasti(/* Real */ ae_vector* a, + /* Integer */ ae_vector* b, + /* Real */ ae_vector* bufa, + /* Integer */ ae_vector* bufb, + ae_int_t n, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + ae_bool isascending; + ae_bool isdescending; + double tmpr; + ae_int_t tmpi; + + + + /* + * Special case + */ + if( n<=1 ) { return; } /* - * if K=0, then C=Beta*C + * Test for already sorted set */ - if( k==0 ) + isascending = ae_true; + isdescending = ae_true; + for(i=1; i<=n-1; i++) { - if( ae_c_neq_d(beta,(double)(1)) ) + isascending = isascending&&a->ptr.p_double[i]>=a->ptr.p_double[i-1]; + isdescending = isdescending&&a->ptr.p_double[i]<=a->ptr.p_double[i-1]; + } + if( isascending ) + { + return; + } + if( isdescending ) + { + for(i=0; i<=n-1; i++) { - if( ae_c_neq_d(beta,(double)(0)) ) - { - for(i=0; i<=m-1; i++) - { - for(j=0; j<=n-1; j++) - { - c->ptr.pp_complex[ic+i][jc+j] = ae_c_mul(beta,c->ptr.pp_complex[ic+i][jc+j]); - } - } - } - else + j = n-1-i; + if( j<=i ) { - for(i=0; i<=m-1; i++) - { - for(j=0; j<=n-1; j++) - { - c->ptr.pp_complex[ic+i][jc+j] = ae_complex_from_i(0); - } - } + break; } + tmpr = a->ptr.p_double[i]; + a->ptr.p_double[i] = a->ptr.p_double[j]; + a->ptr.p_double[j] = tmpr; + tmpi = b->ptr.p_int[i]; + b->ptr.p_int[i] = b->ptr.p_int[j]; + b->ptr.p_int[j] = tmpi; } return; } /* - * This phase is not really necessary, but compiler complains - * about "possibly uninitialized variables" - */ - a0x = (double)(0); - a0y = (double)(0); - a1x = (double)(0); - a1y = (double)(0); - b0x = (double)(0); - b0y = (double)(0); - b1x = (double)(0); - b1y = (double)(0); - - /* * General case */ - i = 0; - while(icntptr.pp_complex[idxa0][offsa].x; - a0y = a->ptr.pp_complex[idxa0][offsa].y; - a1x = a->ptr.pp_complex[idxa1][offsa].x; - a1y = a->ptr.pp_complex[idxa1][offsa].y; - } - if( optypea==1 ) - { - a0x = a->ptr.pp_complex[offsa][idxa0].x; - a0y = a->ptr.pp_complex[offsa][idxa0].y; - a1x = a->ptr.pp_complex[offsa][idxa1].x; - a1y = a->ptr.pp_complex[offsa][idxa1].y; - } - if( optypea==2 ) - { - a0x = a->ptr.pp_complex[offsa][idxa0].x; - a0y = -a->ptr.pp_complex[offsa][idxa0].y; - a1x = a->ptr.pp_complex[offsa][idxa1].x; - a1y = -a->ptr.pp_complex[offsa][idxa1].y; - } - if( optypeb==0 ) - { - b0x = b->ptr.pp_complex[offsb][idxb0].x; - b0y = b->ptr.pp_complex[offsb][idxb0].y; - b1x = b->ptr.pp_complex[offsb][idxb1].x; - b1y = b->ptr.pp_complex[offsb][idxb1].y; - } - if( optypeb==1 ) - { - b0x = b->ptr.pp_complex[idxb0][offsb].x; - b0y = b->ptr.pp_complex[idxb0][offsb].y; - b1x = b->ptr.pp_complex[idxb1][offsb].x; - b1y = b->ptr.pp_complex[idxb1][offsb].y; - } - if( optypeb==2 ) - { - b0x = b->ptr.pp_complex[idxb0][offsb].x; - b0y = -b->ptr.pp_complex[idxb0][offsb].y; - b1x = b->ptr.pp_complex[idxb1][offsb].x; - b1y = -b->ptr.pp_complex[idxb1][offsb].y; - } - v00x = v00x+a0x*b0x-a0y*b0y; - v00y = v00y+a0x*b0y+a0y*b0x; - v01x = v01x+a0x*b1x-a0y*b1y; - v01y = v01y+a0x*b1y+a0y*b1x; - v10x = v10x+a1x*b0x-a1y*b0y; - v10y = v10y+a1x*b0y+a1y*b0x; - v11x = v11x+a1x*b1x-a1y*b1y; - v11y = v11y+a1x*b1y+a1y*b1x; - offsa = offsa+1; - offsb = offsb+1; - } - v00.x = v00x; - v00.y = v00y; - v10.x = v10x; - v10.y = v10y; - v01.x = v01x; - v01.y = v01y; - v11.x = v11x; - v11.y = v11y; - if( ae_c_eq_d(beta,(double)(0)) ) - { - c->ptr.pp_complex[ic+i+0][jc+j+0] = ae_c_mul(alpha,v00); - c->ptr.pp_complex[ic+i+0][jc+j+1] = ae_c_mul(alpha,v01); - c->ptr.pp_complex[ic+i+1][jc+j+0] = ae_c_mul(alpha,v10); - c->ptr.pp_complex[ic+i+1][jc+j+1] = ae_c_mul(alpha,v11); - } - else - { - c->ptr.pp_complex[ic+i+0][jc+j+0] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+0][jc+j+0]),ae_c_mul(alpha,v00)); - c->ptr.pp_complex[ic+i+0][jc+j+1] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+0][jc+j+1]),ae_c_mul(alpha,v01)); - c->ptr.pp_complex[ic+i+1][jc+j+0] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+1][jc+j+0]),ae_c_mul(alpha,v10)); - c->ptr.pp_complex[ic+i+1][jc+j+1] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+1][jc+j+1]),ae_c_mul(alpha,v11)); - } - } - else - { - - /* - * Determine submatrix [I0..I1]x[J0..J1] to process - */ - i0 = i; - i1 = ae_minint(i+1, m-1, _state); - j0 = j; - j1 = ae_minint(j+1, n-1, _state); - - /* - * Process submatrix - */ - for(ik=i0; ik<=i1; ik++) - { - for(jk=j0; jk<=j1; jk++) - { - if( k==0||ae_c_eq_d(alpha,(double)(0)) ) - { - v = ae_complex_from_i(0); - } - else - { - v = ae_complex_from_d(0.0); - if( optypea==0&&optypeb==0 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia+ik][ja], 1, "N", &b->ptr.pp_complex[ib][jb+jk], b->stride, "N", ae_v_len(ja,ja+k-1)); - } - if( optypea==0&&optypeb==1 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia+ik][ja], 1, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "N", ae_v_len(ja,ja+k-1)); - } - if( optypea==0&&optypeb==2 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia+ik][ja], 1, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "Conj", ae_v_len(ja,ja+k-1)); - } - if( optypea==1&&optypeb==0 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "N", &b->ptr.pp_complex[ib][jb+jk], b->stride, "N", ae_v_len(ia,ia+k-1)); - } - if( optypea==1&&optypeb==1 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "N", ae_v_len(ia,ia+k-1)); - } - if( optypea==1&&optypeb==2 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "Conj", ae_v_len(ia,ia+k-1)); - } - if( optypea==2&&optypeb==0 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "Conj", &b->ptr.pp_complex[ib][jb+jk], b->stride, "N", ae_v_len(ia,ia+k-1)); - } - if( optypea==2&&optypeb==1 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "Conj", &b->ptr.pp_complex[ib+jk][jb], 1, "N", ae_v_len(ia,ia+k-1)); - } - if( optypea==2&&optypeb==2 ) - { - v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "Conj", &b->ptr.pp_complex[ib+jk][jb], 1, "Conj", ae_v_len(ia,ia+k-1)); - } - } - if( ae_c_eq_d(beta,(double)(0)) ) - { - c->ptr.pp_complex[ic+ik][jc+jk] = ae_c_mul(alpha,v); - } - else - { - c->ptr.pp_complex[ic+ik][jc+jk] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+ik][jc+jk]),ae_c_mul(alpha,v)); - } - } - } - } - j = j+2; - } - i = i+2; + ae_vector_set_length(bufa, n, _state); + } + if( bufb->cnt0 - N - matrix size, N>0 - K - matrix size, K>0 - Alpha - coefficient - A - matrix - IA - submatrix offset - JA - submatrix offset - OpTypeA - transformation type: - * 0 - no transformation - * 1 - transposition - B - matrix - IB - submatrix offset - JB - submatrix offset - OpTypeB - transformation type: - * 0 - no transformation - * 1 - transposition - Beta - coefficient - C - PREALLOCATED output matrix - IC - submatrix offset - JC - submatrix offset +NOTES: +1. this function assumes that A[] is finite; it doesn't checks that + condition. All other conditions (size of input arrays, etc.) are not + checked too. +2. this function uses two buffers, BufA and BufB, each is N elements large. + They may be preallocated (which will save some time) or not, in which + case function will automatically allocate memory. - -- ALGLIB routine -- - 27.03.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 11.12.2008 by Bochkanov Sergey *************************************************************************/ -void rmatrixgemmk(ae_int_t m, +void tagsortfastr(/* Real */ ae_vector* a, + /* Real */ ae_vector* b, + /* Real */ ae_vector* bufa, + /* Real */ ae_vector* bufb, ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, ae_state *_state) { ae_int_t i; ae_int_t j; + ae_bool isascending; + ae_bool isdescending; + double tmpr; /* - * if matrix size is zero + * Special case */ - if( m==0||n==0 ) + if( n<=1 ) { return; } /* - * Try optimized code + * Test for already sorted set */ - if( rmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc, _state) ) + isascending = ae_true; + isdescending = ae_true; + for(i=1; i<=n-1; i++) + { + isascending = isascending&&a->ptr.p_double[i]>=a->ptr.p_double[i-1]; + isdescending = isdescending&&a->ptr.p_double[i]<=a->ptr.p_double[i-1]; + } + if( isascending ) { return; } - - /* - * if K=0, then C=Beta*C - */ - if( k==0||ae_fp_eq(alpha,(double)(0)) ) + if( isdescending ) { - if( ae_fp_neq(beta,(double)(1)) ) + for(i=0; i<=n-1; i++) { - if( ae_fp_neq(beta,(double)(0)) ) - { - for(i=0; i<=m-1; i++) - { - for(j=0; j<=n-1; j++) - { - c->ptr.pp_double[ic+i][jc+j] = beta*c->ptr.pp_double[ic+i][jc+j]; - } - } - } - else + j = n-1-i; + if( j<=i ) { - for(i=0; i<=m-1; i++) - { - for(j=0; j<=n-1; j++) - { - c->ptr.pp_double[ic+i][jc+j] = (double)(0); - } - } + break; } + tmpr = a->ptr.p_double[i]; + a->ptr.p_double[i] = a->ptr.p_double[j]; + a->ptr.p_double[j] = tmpr; + tmpr = b->ptr.p_double[i]; + b->ptr.p_double[i] = b->ptr.p_double[j]; + b->ptr.p_double[j] = tmpr; } return; } /* - * Call specialized code. - * - * NOTE: specialized code was moved to separate function because of strange - * issues with instructions cache on some systems; Having too long - * functions significantly slows down internal loop of the algorithm. + * General case */ - if( optypea==0&&optypeb==0 ) + if( bufa->cntcntptr.p_double[i]>=a->ptr.p_double[i-1]; + isdescending = isdescending&&a->ptr.p_double[i]<=a->ptr.p_double[i-1]; + } + if( isascending ) + { + return; + } + if( isdescending ) + { + for(i=0; i<=n-1; i++) + { + j = n-1-i; + if( j<=i ) + { + break; + } + tmpr = a->ptr.p_double[i]; + a->ptr.p_double[i] = a->ptr.p_double[j]; + a->ptr.p_double[j] = tmpr; + } + return; + } + + /* + * General case + */ + if( bufa->cnt0 (assertion is thrown otherwise) +A is sorted, and same permutations are applied to B. -INPUT PARAMETERS - M - matrix size, M>0 - N - matrix size, N>0 - K - matrix size, K>0 - Alpha - coefficient - A - matrix - IA - submatrix offset - JA - submatrix offset - B - matrix - IB - submatrix offset - JB - submatrix offset - Beta - coefficient - C - PREALLOCATED output matrix - IC - submatrix offset - JC - submatrix offset +NOTES: + this function assumes that A[] is finite; it doesn't checks that + condition. All other conditions (size of input arrays, etc.) are not + checked too. - -- ALGLIB routine -- - 27.03.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 11.12.2008 by Bochkanov Sergey *************************************************************************/ -void rmatrixgemmk44v00(ae_int_t m, +void tagsortmiddleir(/* Integer */ ae_vector* a, + /* Real */ ae_vector* b, + ae_int_t offset, ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, ae_state *_state) { ae_int_t i; - ae_int_t j; - double v; - double v00; - double v01; - double v02; - double v03; - double v10; - double v11; - double v12; - double v13; - double v20; - double v21; - double v22; - double v23; - double v30; - double v31; - double v32; - double v33; - double a0; - double a1; - double a2; - double a3; - double b0; - double b1; - double b2; - double b3; - ae_int_t idxa0; - ae_int_t idxa1; - ae_int_t idxa2; - ae_int_t idxa3; - ae_int_t idxb0; - ae_int_t idxb1; - ae_int_t idxb2; - ae_int_t idxb3; - ae_int_t i0; - ae_int_t i1; - ae_int_t ik; - ae_int_t j0; - ae_int_t j1; - ae_int_t jk; + ae_int_t k; ae_int_t t; - ae_int_t offsa; - ae_int_t offsb; + ae_int_t tmp; + double tmpr; + ae_int_t p0; + ae_int_t p1; + ae_int_t at; + ae_int_t ak; + ae_int_t ak1; + double bt; - ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); /* - * if matrix size is zero + * Special cases */ - if( m==0||n==0 ) + if( n<=1 ) { return; } /* - * A*B + * General case, N>1: sort, update B */ - i = 0; - while(iptr.p_int[p0]; + at = a->ptr.p_int[p1]; + if( ak>=at ) { - - /* - * Specialized 4x4 code for [I..I+3]x[J..J+3] submatrix of C. - * - * This submatrix is calculated as sum of K rank-1 products, - * with operands cached in local variables in order to speed - * up operations with arrays. - */ - idxa0 = ia+i+0; - idxa1 = ia+i+1; - idxa2 = ia+i+2; - idxa3 = ia+i+3; - offsa = ja; - idxb0 = jb+j+0; - idxb1 = jb+j+1; - idxb2 = jb+j+2; - idxb3 = jb+j+3; - offsb = ib; - v00 = 0.0; - v01 = 0.0; - v02 = 0.0; - v03 = 0.0; - v10 = 0.0; - v11 = 0.0; - v12 = 0.0; - v13 = 0.0; - v20 = 0.0; - v21 = 0.0; - v22 = 0.0; - v23 = 0.0; - v30 = 0.0; - v31 = 0.0; - v32 = 0.0; - v33 = 0.0; - - /* - * Different variants of internal loop - */ - for(t=0; t<=k-1; t++) - { - a0 = a->ptr.pp_double[idxa0][offsa]; - a1 = a->ptr.pp_double[idxa1][offsa]; - b0 = b->ptr.pp_double[offsb][idxb0]; - b1 = b->ptr.pp_double[offsb][idxb1]; - v00 = v00+a0*b0; - v01 = v01+a0*b1; - v10 = v10+a1*b0; - v11 = v11+a1*b1; - a2 = a->ptr.pp_double[idxa2][offsa]; - a3 = a->ptr.pp_double[idxa3][offsa]; - v20 = v20+a2*b0; - v21 = v21+a2*b1; - v30 = v30+a3*b0; - v31 = v31+a3*b1; - b2 = b->ptr.pp_double[offsb][idxb2]; - b3 = b->ptr.pp_double[offsb][idxb3]; - v22 = v22+a2*b2; - v23 = v23+a2*b3; - v32 = v32+a3*b2; - v33 = v33+a3*b3; - v02 = v02+a0*b2; - v03 = v03+a0*b3; - v12 = v12+a1*b2; - v13 = v13+a1*b3; - offsa = offsa+1; - offsb = offsb+1; - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; - } - else - { - c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; - } + break; } - else + a->ptr.p_int[p0] = at; + a->ptr.p_int[p1] = ak; + tmpr = b->ptr.p_double[p0]; + b->ptr.p_double[p0] = b->ptr.p_double[p1]; + b->ptr.p_double[p1] = tmpr; + t = k; + } + } + for(i=n-1; i>=1; i--) + { + p0 = offset+0; + p1 = offset+i; + tmp = a->ptr.p_int[p1]; + a->ptr.p_int[p1] = a->ptr.p_int[p0]; + a->ptr.p_int[p0] = tmp; + at = tmp; + tmpr = b->ptr.p_double[p1]; + b->ptr.p_double[p1] = b->ptr.p_double[p0]; + b->ptr.p_double[p0] = tmpr; + bt = tmpr; + t = 0; + for(;;) + { + k = 2*t+1; + if( k+1>i ) { - - /* - * Determine submatrix [I0..I1]x[J0..J1] to process - */ - i0 = i; - i1 = ae_minint(i+3, m-1, _state); - j0 = j; - j1 = ae_minint(j+3, n-1, _state); - - /* - * Process submatrix - */ - for(ik=i0; ik<=i1; ik++) + break; + } + p0 = offset+t; + p1 = offset+k; + ak = a->ptr.p_int[p1]; + if( k+1ptr.p_int[p1+1]; + if( ak1>ak ) { - for(jk=j0; jk<=j1; jk++) - { - if( k==0||ae_fp_eq(alpha,(double)(0)) ) - { - v = (double)(0); - } - else - { - v = ae_v_dotproduct(&a->ptr.pp_double[ia+ik][ja], 1, &b->ptr.pp_double[ib][jb+jk], b->stride, ae_v_len(ja,ja+k-1)); - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; - } - else - { - c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; - } - } + ak = ak1; + p1 = p1+1; + k = k+1; } } - j = j+4; + if( at>=ak ) + { + break; + } + a->ptr.p_int[p1] = at; + a->ptr.p_int[p0] = ak; + b->ptr.p_double[p0] = b->ptr.p_double[p1]; + b->ptr.p_double[p1] = bt; + t = k; } - i = i+4; } } /************************************************************************* -RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation -with OpTypeA=0 and OpTypeB=1. - -Additional info: -* this function requires that Alpha<>0 (assertion is thrown otherwise) - -INPUT PARAMETERS - M - matrix size, M>0 - N - matrix size, N>0 - K - matrix size, K>0 - Alpha - coefficient - A - matrix - IA - submatrix offset - JA - submatrix offset - B - matrix - IB - submatrix offset - JB - submatrix offset - Beta - coefficient - C - PREALLOCATED output matrix - IC - submatrix offset - JC - submatrix offset +Sorting function optimized for integer values (only keys, no labels), can +be used to sort middle of the array - -- ALGLIB routine -- - 27.03.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 11.12.2008 by Bochkanov Sergey *************************************************************************/ -void rmatrixgemmk44v01(ae_int_t m, +void sortmiddlei(/* Integer */ ae_vector* a, + ae_int_t offset, ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, ae_state *_state) { ae_int_t i; - ae_int_t j; - double v; - double v00; - double v01; - double v02; - double v03; - double v10; - double v11; - double v12; - double v13; - double v20; - double v21; - double v22; - double v23; - double v30; - double v31; - double v32; - double v33; - double a0; - double a1; - double a2; - double a3; - double b0; - double b1; - double b2; - double b3; - ae_int_t idxa0; - ae_int_t idxa1; - ae_int_t idxa2; - ae_int_t idxa3; - ae_int_t idxb0; - ae_int_t idxb1; - ae_int_t idxb2; - ae_int_t idxb3; - ae_int_t i0; - ae_int_t i1; - ae_int_t ik; - ae_int_t j0; - ae_int_t j1; - ae_int_t jk; + ae_int_t k; ae_int_t t; - ae_int_t offsa; - ae_int_t offsb; + ae_int_t tmp; + ae_int_t p0; + ae_int_t p1; + ae_int_t at; + ae_int_t ak; + ae_int_t ak1; - ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); /* - * if matrix size is zero + * Special cases */ - if( m==0||n==0 ) + if( n<=1 ) { return; } /* - * A*B' + * General case, N>1: sort, update B */ - i = 0; - while(iptr.p_int[p0]; + at = a->ptr.p_int[p1]; + if( ak>=at ) { - - /* - * Specialized 4x4 code for [I..I+3]x[J..J+3] submatrix of C. - * - * This submatrix is calculated as sum of K rank-1 products, - * with operands cached in local variables in order to speed - * up operations with arrays. - */ - idxa0 = ia+i+0; - idxa1 = ia+i+1; - idxa2 = ia+i+2; - idxa3 = ia+i+3; - offsa = ja; - idxb0 = ib+j+0; - idxb1 = ib+j+1; - idxb2 = ib+j+2; - idxb3 = ib+j+3; - offsb = jb; - v00 = 0.0; - v01 = 0.0; - v02 = 0.0; - v03 = 0.0; - v10 = 0.0; - v11 = 0.0; - v12 = 0.0; - v13 = 0.0; - v20 = 0.0; - v21 = 0.0; - v22 = 0.0; - v23 = 0.0; - v30 = 0.0; - v31 = 0.0; - v32 = 0.0; - v33 = 0.0; - for(t=0; t<=k-1; t++) - { - a0 = a->ptr.pp_double[idxa0][offsa]; - a1 = a->ptr.pp_double[idxa1][offsa]; - b0 = b->ptr.pp_double[idxb0][offsb]; - b1 = b->ptr.pp_double[idxb1][offsb]; - v00 = v00+a0*b0; - v01 = v01+a0*b1; - v10 = v10+a1*b0; - v11 = v11+a1*b1; - a2 = a->ptr.pp_double[idxa2][offsa]; - a3 = a->ptr.pp_double[idxa3][offsa]; - v20 = v20+a2*b0; - v21 = v21+a2*b1; - v30 = v30+a3*b0; - v31 = v31+a3*b1; - b2 = b->ptr.pp_double[idxb2][offsb]; - b3 = b->ptr.pp_double[idxb3][offsb]; - v22 = v22+a2*b2; - v23 = v23+a2*b3; - v32 = v32+a3*b2; - v33 = v33+a3*b3; - v02 = v02+a0*b2; - v03 = v03+a0*b3; - v12 = v12+a1*b2; - v13 = v13+a1*b3; - offsa = offsa+1; - offsb = offsb+1; - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; - } - else - { - c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; - } + break; } - else + a->ptr.p_int[p0] = at; + a->ptr.p_int[p1] = ak; + t = k; + } + } + for(i=n-1; i>=1; i--) + { + p0 = offset+0; + p1 = offset+i; + tmp = a->ptr.p_int[p1]; + a->ptr.p_int[p1] = a->ptr.p_int[p0]; + a->ptr.p_int[p0] = tmp; + at = tmp; + t = 0; + for(;;) + { + k = 2*t+1; + if( k+1>i ) { - - /* - * Determine submatrix [I0..I1]x[J0..J1] to process - */ - i0 = i; - i1 = ae_minint(i+3, m-1, _state); - j0 = j; - j1 = ae_minint(j+3, n-1, _state); - - /* - * Process submatrix - */ - for(ik=i0; ik<=i1; ik++) + break; + } + p0 = offset+t; + p1 = offset+k; + ak = a->ptr.p_int[p1]; + if( k+1ptr.p_int[p1+1]; + if( ak1>ak ) { - for(jk=j0; jk<=j1; jk++) - { - if( k==0||ae_fp_eq(alpha,(double)(0)) ) - { - v = (double)(0); - } - else - { - v = ae_v_dotproduct(&a->ptr.pp_double[ia+ik][ja], 1, &b->ptr.pp_double[ib+jk][jb], 1, ae_v_len(ja,ja+k-1)); - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; - } - else - { - c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; - } - } + ak = ak1; + p1 = p1+1; + k = k+1; } } - j = j+4; + if( at>=ak ) + { + break; + } + a->ptr.p_int[p1] = at; + a->ptr.p_int[p0] = ak; + t = k; } - i = i+4; } } /************************************************************************* -RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation -with OpTypeA=1 and OpTypeB=0. - -Additional info: -* this function requires that Alpha<>0 (assertion is thrown otherwise) +Heap operations: adds element to the heap -INPUT PARAMETERS - M - matrix size, M>0 - N - matrix size, N>0 - K - matrix size, K>0 - Alpha - coefficient - A - matrix - IA - submatrix offset - JA - submatrix offset - B - matrix - IB - submatrix offset - JB - submatrix offset - Beta - coefficient - C - PREALLOCATED output matrix - IC - submatrix offset - JC - submatrix offset +PARAMETERS: + A - heap itself, must be at least array[0..N] + B - array of integer tags, which are updated according to + permutations in the heap + N - size of the heap (without new element). + updated on output + VA - value of the element being added + VB - value of the tag - -- ALGLIB routine -- - 27.03.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void rmatrixgemmk44v10(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, +void tagheappushi(/* Real */ ae_vector* a, + /* Integer */ ae_vector* b, + ae_int_t* n, + double va, + ae_int_t vb, ae_state *_state) { - ae_int_t i; ae_int_t j; + ae_int_t k; double v; - double v00; - double v01; - double v02; - double v03; - double v10; - double v11; - double v12; - double v13; - double v20; - double v21; - double v22; - double v23; - double v30; - double v31; - double v32; - double v33; - double a0; - double a1; - double a2; - double a3; - double b0; - double b1; - double b2; - double b3; - ae_int_t idxa0; - ae_int_t idxa1; - ae_int_t idxa2; - ae_int_t idxa3; - ae_int_t idxb0; - ae_int_t idxb1; - ae_int_t idxb2; - ae_int_t idxb3; - ae_int_t i0; - ae_int_t i1; - ae_int_t ik; - ae_int_t j0; - ae_int_t j1; - ae_int_t jk; - ae_int_t t; - ae_int_t offsa; - ae_int_t offsb; - ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); - - /* - * if matrix size is zero - */ - if( m==0||n==0 ) + if( *n<0 ) { return; } /* - * A'*B + * N=0 is a special case */ - i = 0; - while(iptr.p_double[0] = va; + b->ptr.p_int[0] = vb; + *n = *n+1; + return; + } + + /* + * add current point to the heap + * (add to the bottom, then move up) + * + * we don't write point to the heap + * until its final position is determined + * (it allow us to reduce number of array access operations) + */ + j = *n; + *n = *n+1; + while(j>0) + { + k = (j-1)/2; + v = a->ptr.p_double[k]; + if( vptr.pp_double[offsa][idxa0]; - a1 = a->ptr.pp_double[offsa][idxa1]; - b0 = b->ptr.pp_double[offsb][idxb0]; - b1 = b->ptr.pp_double[offsb][idxb1]; - v00 = v00+a0*b0; - v01 = v01+a0*b1; - v10 = v10+a1*b0; - v11 = v11+a1*b1; - a2 = a->ptr.pp_double[offsa][idxa2]; - a3 = a->ptr.pp_double[offsa][idxa3]; - v20 = v20+a2*b0; - v21 = v21+a2*b1; - v30 = v30+a3*b0; - v31 = v31+a3*b1; - b2 = b->ptr.pp_double[offsb][idxb2]; - b3 = b->ptr.pp_double[offsb][idxb3]; - v22 = v22+a2*b2; - v23 = v23+a2*b3; - v32 = v32+a3*b2; - v33 = v33+a3*b3; - v02 = v02+a0*b2; - v03 = v03+a0*b3; - v12 = v12+a1*b2; - v13 = v13+a1*b3; - offsa = offsa+1; - offsb = offsb+1; - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; - } - else - { - c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; - } - } - else - { - - /* - * Determine submatrix [I0..I1]x[J0..J1] to process - */ - i0 = i; - i1 = ae_minint(i+3, m-1, _state); - j0 = j; - j1 = ae_minint(j+3, n-1, _state); - - /* - * Process submatrix - */ - for(ik=i0; ik<=i1; ik++) - { - for(jk=j0; jk<=j1; jk++) - { - if( k==0||ae_fp_eq(alpha,(double)(0)) ) - { - v = (double)(0); - } - else - { - v = 0.0; - v = ae_v_dotproduct(&a->ptr.pp_double[ia][ja+ik], a->stride, &b->ptr.pp_double[ib][jb+jk], b->stride, ae_v_len(ia,ia+k-1)); - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; - } - else - { - c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; - } - } - } - } - j = j+4; + a->ptr.p_double[j] = v; + b->ptr.p_int[j] = b->ptr.p_int[k]; + j = k; + } + else + { + + /* + * element in its place. terminate. + */ + break; } - i = i+4; } + a->ptr.p_double[j] = va; + b->ptr.p_int[j] = vb; } /************************************************************************* -RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation -with OpTypeA=1 and OpTypeB=1. - -Additional info: -* this function requires that Alpha<>0 (assertion is thrown otherwise) +Heap operations: replaces top element with new element +(which is moved down) -INPUT PARAMETERS - M - matrix size, M>0 - N - matrix size, N>0 - K - matrix size, K>0 - Alpha - coefficient - A - matrix - IA - submatrix offset - JA - submatrix offset - B - matrix - IB - submatrix offset - JB - submatrix offset - Beta - coefficient - C - PREALLOCATED output matrix - IC - submatrix offset - JC - submatrix offset +PARAMETERS: + A - heap itself, must be at least array[0..N-1] + B - array of integer tags, which are updated according to + permutations in the heap + N - size of the heap + VA - value of the element which replaces top element + VB - value of the tag - -- ALGLIB routine -- - 27.03.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void rmatrixgemmk44v11(ae_int_t m, +void tagheapreplacetopi(/* Real */ ae_vector* a, + /* Integer */ ae_vector* b, ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, + double va, + ae_int_t vb, ae_state *_state) { - ae_int_t i; ae_int_t j; + ae_int_t k1; + ae_int_t k2; double v; - double v00; - double v01; - double v02; - double v03; - double v10; - double v11; - double v12; - double v13; - double v20; - double v21; - double v22; - double v23; - double v30; - double v31; - double v32; - double v33; - double a0; - double a1; - double a2; - double a3; - double b0; - double b1; - double b2; - double b3; - ae_int_t idxa0; - ae_int_t idxa1; - ae_int_t idxa2; - ae_int_t idxa3; - ae_int_t idxb0; - ae_int_t idxb1; - ae_int_t idxb2; - ae_int_t idxb3; - ae_int_t i0; - ae_int_t i1; - ae_int_t ik; - ae_int_t j0; - ae_int_t j1; - ae_int_t jk; - ae_int_t t; - ae_int_t offsa; - ae_int_t offsb; + double v1; + double v2; - ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); + if( n<1 ) + { + return; + } /* - * if matrix size is zero + * N=1 is a special case */ - if( m==0||n==0 ) + if( n==1 ) { + a->ptr.p_double[0] = va; + b->ptr.p_int[0] = vb; return; } /* - * A'*B' + * move down through heap: + * * J - current element + * * K1 - first child (always exists) + * * K2 - second child (may not exists) + * + * we don't write point to the heap + * until its final position is determined + * (it allow us to reduce number of array access operations) */ - i = 0; - while(i=n ) { /* - * Choose between specialized 4x4 code and general code + * only one child. + * + * swap and terminate (because this child + * have no siblings due to heap structure) */ - if( i+4<=m&&j+4<=n ) + v = a->ptr.p_double[k1]; + if( v>va ) { - - /* - * Specialized 4x4 code for [I..I+3]x[J..J+3] submatrix of C. - * - * This submatrix is calculated as sum of K rank-1 products, - * with operands cached in local variables in order to speed - * up operations with arrays. - */ - idxa0 = ja+i+0; - idxa1 = ja+i+1; - idxa2 = ja+i+2; - idxa3 = ja+i+3; - offsa = ia; - idxb0 = ib+j+0; - idxb1 = ib+j+1; - idxb2 = ib+j+2; - idxb3 = ib+j+3; - offsb = jb; - v00 = 0.0; - v01 = 0.0; - v02 = 0.0; - v03 = 0.0; - v10 = 0.0; - v11 = 0.0; - v12 = 0.0; - v13 = 0.0; - v20 = 0.0; - v21 = 0.0; - v22 = 0.0; - v23 = 0.0; - v30 = 0.0; - v31 = 0.0; - v32 = 0.0; - v33 = 0.0; - for(t=0; t<=k-1; t++) - { - a0 = a->ptr.pp_double[offsa][idxa0]; - a1 = a->ptr.pp_double[offsa][idxa1]; - b0 = b->ptr.pp_double[idxb0][offsb]; - b1 = b->ptr.pp_double[idxb1][offsb]; - v00 = v00+a0*b0; - v01 = v01+a0*b1; - v10 = v10+a1*b0; - v11 = v11+a1*b1; - a2 = a->ptr.pp_double[offsa][idxa2]; - a3 = a->ptr.pp_double[offsa][idxa3]; - v20 = v20+a2*b0; - v21 = v21+a2*b1; - v30 = v30+a3*b0; - v31 = v31+a3*b1; - b2 = b->ptr.pp_double[idxb2][offsb]; - b3 = b->ptr.pp_double[idxb3][offsb]; - v22 = v22+a2*b2; - v23 = v23+a2*b3; - v32 = v32+a3*b2; - v33 = v33+a3*b3; - v02 = v02+a0*b2; - v03 = v03+a0*b3; - v12 = v12+a1*b2; - v13 = v13+a1*b3; - offsa = offsa+1; - offsb = offsb+1; - } - if( ae_fp_eq(beta,(double)(0)) ) + a->ptr.p_double[j] = v; + b->ptr.p_int[j] = b->ptr.p_int[k1]; + j = k1; + } + break; + } + else + { + + /* + * two childs + */ + v1 = a->ptr.p_double[k1]; + v2 = a->ptr.p_double[k2]; + if( v1>v2 ) + { + if( vaptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; + a->ptr.p_double[j] = v1; + b->ptr.p_int[j] = b->ptr.p_int[k1]; + j = k1; } else { - c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; - c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; - c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; - c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; - c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; - c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; - c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; - c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; - c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; - c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; - c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; - c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; - c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; - c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; - c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; - c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; + break; } } else { - - /* - * Determine submatrix [I0..I1]x[J0..J1] to process - */ - i0 = i; - i1 = ae_minint(i+3, m-1, _state); - j0 = j; - j1 = ae_minint(j+3, n-1, _state); - - /* - * Process submatrix - */ - for(ik=i0; ik<=i1; ik++) + if( vaptr.pp_double[ia][ja+ik], a->stride, &b->ptr.pp_double[ib+jk][jb], 1, ae_v_len(ia,ia+k-1)); - } - if( ae_fp_eq(beta,(double)(0)) ) - { - c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; - } - else - { - c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; - } - } + a->ptr.p_double[j] = v2; + b->ptr.p_int[j] = b->ptr.p_int[k2]; + j = k2; + } + else + { + break; } } - j = j+4; + k1 = 2*j+1; + k2 = 2*j+2; } - i = i+4; } + a->ptr.p_double[j] = va; + b->ptr.p_int[j] = vb; } +/************************************************************************* +Heap operations: pops top element from the heap +PARAMETERS: + A - heap itself, must be at least array[0..N-1] + B - array of integer tags, which are updated according to + permutations in the heap + N - size of the heap, N>=1 -/************************************************************************* -MKL-based kernel +On output top element is moved to A[N-1], B[N-1], heap is reordered, N is +decreased by 1. - -- ALGLIB routine -- - 01.10.2013 - Bochkanov Sergey + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixsyrkmkl(ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_bool isupper, +void tagheappopi(/* Real */ ae_vector* a, + /* Integer */ ae_vector* b, + ae_int_t* n, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; + double va; + ae_int_t vb; - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixsyrkmkl(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); -#endif + if( *n<1 ) + { + return; + } + + /* + * N=1 is a special case + */ + if( *n==1 ) + { + *n = 0; + return; + } + + /* + * swap top element and last element, + * then reorder heap + */ + va = a->ptr.p_double[*n-1]; + vb = b->ptr.p_int[*n-1]; + a->ptr.p_double[*n-1] = a->ptr.p_double[0]; + b->ptr.p_int[*n-1] = b->ptr.p_int[0]; + *n = *n-1; + tagheapreplacetopi(a, b, *n, va, vb, _state); } /************************************************************************* -MKL-based kernel - - -- ALGLIB routine -- - 01.10.2013 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixherkmkl(ae_int_t n, - ae_int_t k, - double alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_bool isupper, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixherkmkl(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); -#endif -} - +Search first element less than T in sorted array. -/************************************************************************* -MKL-based kernel +PARAMETERS: + A - sorted array by ascending from 0 to N-1 + N - number of elements in array + T - the desired element - -- ALGLIB routine -- - 01.10.2013 - Bochkanov Sergey +RESULT: + The very first element's index, which isn't less than T. +In the case when there aren't such elements, returns N. *************************************************************************/ -ae_bool rmatrixgemmmkl(ae_int_t m, +ae_int_t lowerbound(/* Real */ ae_vector* a, ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, + double t, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; + ae_int_t l; + ae_int_t half; + ae_int_t first; + ae_int_t middle; + ae_int_t result; - result = ae_false; + l = n; + first = 0; + while(l>0) + { + half = l/2; + middle = first+half; + if( ae_fp_less(a->ptr.p_double[middle],t) ) + { + first = middle+1; + l = l-half-1; + } + else + { + l = half; + } + } + result = first; return result; -#else - return _ialglib_i_rmatrixgemmmkl(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); -#endif } /************************************************************************* -MKL-based kernel - - -- ALGLIB routine -- - 16.10.2014 - Bochkanov Sergey -*************************************************************************/ -ae_bool cmatrixgemmmkl(ae_int_t m, - ae_int_t n, - ae_int_t k, - ae_complex alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Complex */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - ae_complex beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixgemmmkl(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); -#endif -} - +Search first element more than T in sorted array. -/************************************************************************* -MKL-based kernel +PARAMETERS: + A - sorted array by ascending from 0 to N-1 + N - number of elements in array + T - the desired element - -- ALGLIB routine -- - 16.10.2014 - Bochkanov Sergey + RESULT: + The very first element's index, which more than T. +In the case when there aren't such elements, returns N. *************************************************************************/ -ae_bool cmatrixlefttrsmmkl(ae_int_t m, +ae_int_t upperbound(/* Real */ ae_vector* a, ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Complex */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, + double t, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; + ae_int_t l; + ae_int_t half; + ae_int_t first; + ae_int_t middle; + ae_int_t result; - result = ae_false; + l = n; + first = 0; + while(l>0) + { + half = l/2; + middle = first+half; + if( ae_fp_less(t,a->ptr.p_double[middle]) ) + { + l = half; + } + else + { + first = middle+1; + l = l-half-1; + } + } + result = first; return result; -#else - return _ialglib_i_cmatrixlefttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif } /************************************************************************* -MKL-based kernel +Internal TagSortFastI: sorts A[I1...I2] (both bounds are included), +applies same permutations to B. - -- ALGLIB routine -- - 16.10.2014 - Bochkanov Sergey + -- ALGLIB -- + Copyright 06.09.2010 by Bochkanov Sergey *************************************************************************/ -ae_bool cmatrixrighttrsmmkl(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, +static void tsort_tagsortfastirec(/* Real */ ae_vector* a, + /* Integer */ ae_vector* b, + /* Real */ ae_vector* bufa, + /* Integer */ ae_vector* bufb, ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Complex */ ae_matrix* x, ae_int_t i2, - ae_int_t j2, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_cmatrixrighttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif -} - + ae_int_t i; + ae_int_t j; + ae_int_t k; + ae_int_t cntless; + ae_int_t cnteq; + ae_int_t cntgreater; + double tmpr; + ae_int_t tmpi; + double v0; + double v1; + double v2; + double vp; -/************************************************************************* -MKL-based kernel - -- ALGLIB routine -- - 16.10.2014 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixlefttrsmmkl(ae_int_t m, - ae_int_t n, - /* Real */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Real */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixlefttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif + + /* + * Fast exit + */ + if( i2<=i1 ) + { + return; + } + + /* + * Non-recursive sort for small arrays + */ + if( i2-i1<=16 ) + { + for(j=i1+1; j<=i2; j++) + { + + /* + * Search elements [I1..J-1] for place to insert Jth element. + * + * This code stops immediately if we can leave A[J] at J-th position + * (all elements have same value of A[J] larger than any of them) + */ + tmpr = a->ptr.p_double[j]; + tmpi = j; + for(k=j-1; k>=i1; k--) + { + if( a->ptr.p_double[k]<=tmpr ) + { + break; + } + tmpi = k; + } + k = tmpi; + + /* + * Insert Jth element into Kth position + */ + if( k!=j ) + { + tmpr = a->ptr.p_double[j]; + tmpi = b->ptr.p_int[j]; + for(i=j-1; i>=k; i--) + { + a->ptr.p_double[i+1] = a->ptr.p_double[i]; + b->ptr.p_int[i+1] = b->ptr.p_int[i]; + } + a->ptr.p_double[k] = tmpr; + b->ptr.p_int[k] = tmpi; + } + } + return; + } + + /* + * Quicksort: choose pivot + * Here we assume that I2-I1>=2 + */ + v0 = a->ptr.p_double[i1]; + v1 = a->ptr.p_double[i1+(i2-i1)/2]; + v2 = a->ptr.p_double[i2]; + if( v0>v1 ) + { + tmpr = v1; + v1 = v0; + v0 = tmpr; + } + if( v1>v2 ) + { + tmpr = v2; + v2 = v1; + v1 = tmpr; + } + if( v0>v1 ) + { + tmpr = v1; + v1 = v0; + v0 = tmpr; + } + vp = v1; + + /* + * now pass through A/B and: + * * move elements that are LESS than VP to the left of A/B + * * move elements that are EQUAL to VP to the right of BufA/BufB (in the reverse order) + * * move elements that are GREATER than VP to the left of BufA/BufB (in the normal order + * * move elements from the tail of BufA/BufB to the middle of A/B (restoring normal order) + * * move elements from the left of BufA/BufB to the end of A/B + */ + cntless = 0; + cnteq = 0; + cntgreater = 0; + for(i=i1; i<=i2; i++) + { + v0 = a->ptr.p_double[i]; + if( v0ptr.p_double[k] = v0; + b->ptr.p_int[k] = b->ptr.p_int[i]; + } + cntless = cntless+1; + continue; + } + if( v0==vp ) + { + + /* + * EQUAL + */ + k = i2-cnteq; + bufa->ptr.p_double[k] = v0; + bufb->ptr.p_int[k] = b->ptr.p_int[i]; + cnteq = cnteq+1; + continue; + } + + /* + * GREATER + */ + k = i1+cntgreater; + bufa->ptr.p_double[k] = v0; + bufb->ptr.p_int[k] = b->ptr.p_int[i]; + cntgreater = cntgreater+1; + } + for(i=0; i<=cnteq-1; i++) + { + j = i1+cntless+cnteq-1-i; + k = i2+i-(cnteq-1); + a->ptr.p_double[j] = bufa->ptr.p_double[k]; + b->ptr.p_int[j] = bufb->ptr.p_int[k]; + } + for(i=0; i<=cntgreater-1; i++) + { + j = i1+cntless+cnteq+i; + k = i1+i; + a->ptr.p_double[j] = bufa->ptr.p_double[k]; + b->ptr.p_int[j] = bufb->ptr.p_int[k]; + } + + /* + * Sort left and right parts of the array (ignoring middle part) + */ + tsort_tagsortfastirec(a, b, bufa, bufb, i1, i1+cntless-1, _state); + tsort_tagsortfastirec(a, b, bufa, bufb, i1+cntless+cnteq, i2, _state); } /************************************************************************* -MKL-based kernel +Internal TagSortFastR: sorts A[I1...I2] (both bounds are included), +applies same permutations to B. - -- ALGLIB routine -- - 16.10.2014 - Bochkanov Sergey + -- ALGLIB -- + Copyright 06.09.2010 by Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixrighttrsmmkl(ae_int_t m, - ae_int_t n, - /* Real */ ae_matrix* a, +static void tsort_tagsortfastrrec(/* Real */ ae_vector* a, + /* Real */ ae_vector* b, + /* Real */ ae_vector* bufa, + /* Real */ ae_vector* bufb, ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Real */ ae_matrix* x, ae_int_t i2, - ae_int_t j2, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixrighttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); -#endif -} - - -/************************************************************************* -MKL-based kernel. - -NOTE: - -if function returned False, CholResult is NOT modified. Not ever referenced! -if function returned True, CholResult is set to status of Cholesky decomposition -(True on succeess). - - -- ALGLIB routine -- - 16.10.2014 - Bochkanov Sergey -*************************************************************************/ -ae_bool spdmatrixcholeskymkl(/* Real */ ae_matrix* a, - ae_int_t offs, - ae_int_t n, - ae_bool isupper, - ae_bool* cholresult, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_spdmatrixcholeskymkl(a, offs, n, isupper, cholresult); -#endif -} - + ae_int_t i; + ae_int_t j; + ae_int_t k; + double tmpr; + double tmpr2; + ae_int_t tmpi; + ae_int_t cntless; + ae_int_t cnteq; + ae_int_t cntgreater; + double v0; + double v1; + double v2; + double vp; -/************************************************************************* -MKL-based kernel. - -- ALGLIB routine -- - 20.10.2014 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixplumkl(/* Real */ ae_matrix* a, - ae_int_t offs, - ae_int_t m, - ae_int_t n, - /* Integer */ ae_vector* pivots, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixplumkl(a, offs, m, n, pivots); -#endif -} - - -/************************************************************************* -MKL-based kernel. - -NOTE: this function needs preallocated output/temporary arrays. - D and E must be at least max(M,N)-wide. - - -- ALGLIB routine -- - 20.10.2014 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixbdmkl(/* Real */ ae_matrix* a, - ae_int_t m, - ae_int_t n, - /* Real */ ae_vector* d, - /* Real */ ae_vector* e, - /* Real */ ae_vector* tauq, - /* Real */ ae_vector* taup, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixbdmkl(a, m, n, d, e, tauq, taup); -#endif + + /* + * Fast exit + */ + if( i2<=i1 ) + { + return; + } + + /* + * Non-recursive sort for small arrays + */ + if( i2-i1<=16 ) + { + for(j=i1+1; j<=i2; j++) + { + + /* + * Search elements [I1..J-1] for place to insert Jth element. + * + * This code stops immediatly if we can leave A[J] at J-th position + * (all elements have same value of A[J] larger than any of them) + */ + tmpr = a->ptr.p_double[j]; + tmpi = j; + for(k=j-1; k>=i1; k--) + { + if( a->ptr.p_double[k]<=tmpr ) + { + break; + } + tmpi = k; + } + k = tmpi; + + /* + * Insert Jth element into Kth position + */ + if( k!=j ) + { + tmpr = a->ptr.p_double[j]; + tmpr2 = b->ptr.p_double[j]; + for(i=j-1; i>=k; i--) + { + a->ptr.p_double[i+1] = a->ptr.p_double[i]; + b->ptr.p_double[i+1] = b->ptr.p_double[i]; + } + a->ptr.p_double[k] = tmpr; + b->ptr.p_double[k] = tmpr2; + } + } + return; + } + + /* + * Quicksort: choose pivot + * Here we assume that I2-I1>=16 + */ + v0 = a->ptr.p_double[i1]; + v1 = a->ptr.p_double[i1+(i2-i1)/2]; + v2 = a->ptr.p_double[i2]; + if( v0>v1 ) + { + tmpr = v1; + v1 = v0; + v0 = tmpr; + } + if( v1>v2 ) + { + tmpr = v2; + v2 = v1; + v1 = tmpr; + } + if( v0>v1 ) + { + tmpr = v1; + v1 = v0; + v0 = tmpr; + } + vp = v1; + + /* + * now pass through A/B and: + * * move elements that are LESS than VP to the left of A/B + * * move elements that are EQUAL to VP to the right of BufA/BufB (in the reverse order) + * * move elements that are GREATER than VP to the left of BufA/BufB (in the normal order + * * move elements from the tail of BufA/BufB to the middle of A/B (restoring normal order) + * * move elements from the left of BufA/BufB to the end of A/B + */ + cntless = 0; + cnteq = 0; + cntgreater = 0; + for(i=i1; i<=i2; i++) + { + v0 = a->ptr.p_double[i]; + if( v0ptr.p_double[k] = v0; + b->ptr.p_double[k] = b->ptr.p_double[i]; + } + cntless = cntless+1; + continue; + } + if( v0==vp ) + { + + /* + * EQUAL + */ + k = i2-cnteq; + bufa->ptr.p_double[k] = v0; + bufb->ptr.p_double[k] = b->ptr.p_double[i]; + cnteq = cnteq+1; + continue; + } + + /* + * GREATER + */ + k = i1+cntgreater; + bufa->ptr.p_double[k] = v0; + bufb->ptr.p_double[k] = b->ptr.p_double[i]; + cntgreater = cntgreater+1; + } + for(i=0; i<=cnteq-1; i++) + { + j = i1+cntless+cnteq-1-i; + k = i2+i-(cnteq-1); + a->ptr.p_double[j] = bufa->ptr.p_double[k]; + b->ptr.p_double[j] = bufb->ptr.p_double[k]; + } + for(i=0; i<=cntgreater-1; i++) + { + j = i1+cntless+cnteq+i; + k = i1+i; + a->ptr.p_double[j] = bufa->ptr.p_double[k]; + b->ptr.p_double[j] = bufb->ptr.p_double[k]; + } + + /* + * Sort left and right parts of the array (ignoring middle part) + */ + tsort_tagsortfastrrec(a, b, bufa, bufb, i1, i1+cntless-1, _state); + tsort_tagsortfastrrec(a, b, bufa, bufb, i1+cntless+cnteq, i2, _state); } /************************************************************************* -MKL-based kernel. - -If ByQ is True, TauP is not used (can be empty array). -If ByQ is False, TauQ is not used (can be empty array). +Internal TagSortFastI: sorts A[I1...I2] (both bounds are included), +applies same permutations to B. - -- ALGLIB routine -- - 20.10.2014 - Bochkanov Sergey + -- ALGLIB -- + Copyright 06.09.2010 by Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixbdmultiplybymkl(/* Real */ ae_matrix* qp, - ae_int_t m, - ae_int_t n, - /* Real */ ae_vector* tauq, - /* Real */ ae_vector* taup, - /* Real */ ae_matrix* z, - ae_int_t zrows, - ae_int_t zcolumns, - ae_bool byq, - ae_bool fromtheright, - ae_bool dotranspose, +static void tsort_tagsortfastrec(/* Real */ ae_vector* a, + /* Real */ ae_vector* bufa, + ae_int_t i1, + ae_int_t i2, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixbdmultiplybymkl(qp, m, n, tauq, taup, z, zrows, zcolumns, byq, fromtheright, dotranspose); -#endif -} - - -/************************************************************************* -MKL-based kernel. + ae_int_t cntless; + ae_int_t cnteq; + ae_int_t cntgreater; + ae_int_t i; + ae_int_t j; + ae_int_t k; + double tmpr; + ae_int_t tmpi; + double v0; + double v1; + double v2; + double vp; -NOTE: Tau must be preallocated array with at least N-1 elements. - -- ALGLIB routine -- - 20.10.2014 - Bochkanov Sergey -*************************************************************************/ -ae_bool rmatrixhessenbergmkl(/* Real */ ae_matrix* a, - ae_int_t n, - /* Real */ ae_vector* tau, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_MKL - ae_bool result; + + /* + * Fast exit + */ + if( i2<=i1 ) + { + return; + } + + /* + * Non-recursive sort for small arrays + */ + if( i2-i1<=16 ) + { + for(j=i1+1; j<=i2; j++) + { + + /* + * Search elements [I1..J-1] for place to insert Jth element. + * + * This code stops immediatly if we can leave A[J] at J-th position + * (all elements have same value of A[J] larger than any of them) + */ + tmpr = a->ptr.p_double[j]; + tmpi = j; + for(k=j-1; k>=i1; k--) + { + if( a->ptr.p_double[k]<=tmpr ) + { + break; + } + tmpi = k; + } + k = tmpi; + + /* + * Insert Jth element into Kth position + */ + if( k!=j ) + { + tmpr = a->ptr.p_double[j]; + for(i=j-1; i>=k; i--) + { + a->ptr.p_double[i+1] = a->ptr.p_double[i]; + } + a->ptr.p_double[k] = tmpr; + } + } + return; + } + + /* + * Quicksort: choose pivot + * Here we assume that I2-I1>=16 + */ + v0 = a->ptr.p_double[i1]; + v1 = a->ptr.p_double[i1+(i2-i1)/2]; + v2 = a->ptr.p_double[i2]; + if( v0>v1 ) + { + tmpr = v1; + v1 = v0; + v0 = tmpr; + } + if( v1>v2 ) + { + tmpr = v2; + v2 = v1; + v1 = tmpr; + } + if( v0>v1 ) + { + tmpr = v1; + v1 = v0; + v0 = tmpr; + } + vp = v1; + + /* + * now pass through A/B and: + * * move elements that are LESS than VP to the left of A/B + * * move elements that are EQUAL to VP to the right of BufA/BufB (in the reverse order) + * * move elements that are GREATER than VP to the left of BufA/BufB (in the normal order + * * move elements from the tail of BufA/BufB to the middle of A/B (restoring normal order) + * * move elements from the left of BufA/BufB to the end of A/B + */ + cntless = 0; + cnteq = 0; + cntgreater = 0; + for(i=i1; i<=i2; i++) + { + v0 = a->ptr.p_double[i]; + if( v0ptr.p_double[k] = v0; + } + cntless = cntless+1; + continue; + } + if( v0==vp ) + { + + /* + * EQUAL + */ + k = i2-cnteq; + bufa->ptr.p_double[k] = v0; + cnteq = cnteq+1; + continue; + } + + /* + * GREATER + */ + k = i1+cntgreater; + bufa->ptr.p_double[k] = v0; + cntgreater = cntgreater+1; + } + for(i=0; i<=cnteq-1; i++) + { + j = i1+cntless+cnteq-1-i; + k = i2+i-(cnteq-1); + a->ptr.p_double[j] = bufa->ptr.p_double[k]; + } + for(i=0; i<=cntgreater-1; i++) + { + j = i1+cntless+cnteq+i; + k = i1+i; + a->ptr.p_double[j] = bufa->ptr.p_double[k]; + } + + /* + * Sort left and right parts of the array (ignoring middle part) + */ + tsort_tagsortfastrec(a, bufa, i1, i1+cntless-1, _state); + tsort_tagsortfastrec(a, bufa, i1+cntless+cnteq, i2, _state); +} - result = ae_false; - return result; -#else - return _ialglib_i_rmatrixhessenbergmkl(a, n, tau); #endif -} +#if defined(AE_COMPILE_ABLASMKL) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -MKL-based kernel. - -NOTE: Q must be preallocated N*N array +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixhessenbergunpackqmkl(/* Real */ ae_matrix* a, +ae_bool rmatrixgermkl(ae_int_t m, ae_int_t n, - /* Real */ ae_vector* tau, - /* Real */ ae_matrix* q, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + double alpha, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6515,28 +5621,27 @@ result = ae_false; return result; #else - return _ialglib_i_rmatrixhessenbergunpackqmkl(a, n, tau, q); + return _ialglib_i_rmatrixgermkl(m, n, a, ia, ja, alpha, u, iu, v, iv); #endif } /************************************************************************* -MKL-based kernel. - -NOTE: Tau, D, E must be preallocated arrays; - length(E)=length(Tau)=N-1 (or larger) - length(D)=N (or larger) +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool smatrixtdmkl(/* Real */ ae_matrix* a, +ae_bool cmatrixrank1mkl(ae_int_t m, ae_int_t n, - ae_bool isupper, - /* Real */ ae_vector* tau, - /* Real */ ae_vector* d, - /* Real */ ae_vector* e, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Complex */ ae_vector* u, + ae_int_t iu, + /* Complex */ ae_vector* v, + ae_int_t iv, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6546,25 +5651,27 @@ result = ae_false; return result; #else - return _ialglib_i_smatrixtdmkl(a, n, isupper, tau, d, e); + return _ialglib_i_cmatrixrank1mkl(m, n, a, ia, ja, u, iu, v, iv); #endif } /************************************************************************* -MKL-based kernel. - -NOTE: Q must be preallocated N*N array +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool smatrixtdunpackqmkl(/* Real */ ae_matrix* a, +ae_bool rmatrixrank1mkl(ae_int_t m, ae_int_t n, - ae_bool isupper, - /* Real */ ae_vector* tau, - /* Real */ ae_matrix* q, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6574,28 +5681,28 @@ result = ae_false; return result; #else - return _ialglib_i_smatrixtdunpackqmkl(a, n, isupper, tau, q); + return _ialglib_i_rmatrixrank1mkl(m, n, a, ia, ja, u, iu, v, iv); #endif } /************************************************************************* -MKL-based kernel. - -NOTE: Tau, D, E must be preallocated arrays; - length(E)=length(Tau)=N-1 (or larger) - length(D)=N (or larger) +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool hmatrixtdmkl(/* Complex */ ae_matrix* a, +ae_bool cmatrixmvmkl(ae_int_t m, ae_int_t n, - ae_bool isupper, - /* Complex */ ae_vector* tau, - /* Real */ ae_vector* d, - /* Real */ ae_vector* e, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t opa, + /* Complex */ ae_vector* x, + ae_int_t ix, + /* Complex */ ae_vector* y, + ae_int_t iy, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6605,25 +5712,28 @@ result = ae_false; return result; #else - return _ialglib_i_hmatrixtdmkl(a, n, isupper, tau, d, e); + return _ialglib_i_cmatrixmvmkl(m, n, a, ia, ja, opa, x, ix, y, iy); #endif } /************************************************************************* -MKL-based kernel. - -NOTE: Q must be preallocated N*N array +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool hmatrixtdunpackqmkl(/* Complex */ ae_matrix* a, +ae_bool rmatrixmvmkl(ae_int_t m, ae_int_t n, - ae_bool isupper, - /* Complex */ ae_vector* tau, - /* Complex */ ae_matrix* q, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t opa, + /* Real */ ae_vector* x, + ae_int_t ix, + /* Real */ ae_vector* y, + ae_int_t iy, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6633,37 +5743,30 @@ result = ae_false; return result; #else - return _ialglib_i_hmatrixtdunpackqmkl(a, n, isupper, tau, q); + return _ialglib_i_rmatrixmvmkl(m, n, a, ia, ja, opa, x, ix, y, iy); #endif } /************************************************************************* -MKL-based kernel. - -Returns True if MKL was present and handled request (MKL completion code -is returned as separate output parameter). - -D and E are pre-allocated arrays with length N (both of them!). On output, -D constraints singular values, and E is destroyed. - -SVDResult is modified if and only if MKL is present. +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixbdsvdmkl(/* Real */ ae_vector* d, - /* Real */ ae_vector* e, +ae_bool rmatrixgemvmkl(ae_int_t m, ae_int_t n, - ae_bool isupper, - /* Real */ ae_matrix* u, - ae_int_t nru, - /* Real */ ae_matrix* c, - ae_int_t ncc, - /* Real */ ae_matrix* vt, - ae_int_t ncvt, - ae_bool* svdresult, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t opa, + /* Real */ ae_vector* x, + ae_int_t ix, + double beta, + /* Real */ ae_vector* y, + ae_int_t iy, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6673,31 +5776,27 @@ result = ae_false; return result; #else - return _ialglib_i_rmatrixbdsvdmkl(d, e, n, isupper, u, nru, c, ncc, vt, ncvt, svdresult); + return _ialglib_i_rmatrixgemvmkl(m, n, alpha, a, ia, ja, opa, x, ix, beta, y, iy); #endif } /************************************************************************* -MKL-based DHSEQR kernel. - -Returns True if MKL was present and handled request. - -WR and WI are pre-allocated arrays with length N. -Z is pre-allocated array[N,N]. +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 12.10.2017 Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixinternalschurdecompositionmkl(/* Real */ ae_matrix* h, - ae_int_t n, - ae_int_t tneeded, - ae_int_t zneeded, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - /* Real */ ae_matrix* z, - ae_int_t* info, +ae_bool rmatrixtrsvmkl(ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_vector* x, + ae_int_t ix, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6707,33 +5806,30 @@ result = ae_false; return result; #else - return _ialglib_i_rmatrixinternalschurdecompositionmkl(h, n, tneeded, zneeded, wr, wi, z, info); + return _ialglib_i_rmatrixtrsvmkl(n, a, ia, ja, isupper, isunit, optype, x, ix); #endif } /************************************************************************* -MKL-based DTREVC kernel. - -Returns True if MKL was present and handled request. - -NOTE: this function does NOT support HOWMNY=3!!!! - -VL and VR are pre-allocated arrays with length N*N, if required. If particalar -variables is not required, it can be dummy (empty) array. +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 01.10.2013 Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixinternaltrevcmkl(/* Real */ ae_matrix* t, - ae_int_t n, - ae_int_t side, - ae_int_t howmny, - /* Real */ ae_matrix* vl, - /* Real */ ae_matrix* vr, - ae_int_t* m, - ae_int_t* info, +ae_bool rmatrixsyrkmkl(ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_bool isupper, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6743,34 +5839,67 @@ result = ae_false; return result; #else - return _ialglib_i_rmatrixinternaltrevcmkl(t, n, side, howmny, vl, vr, m, info); + return _ialglib_i_rmatrixsyrkmkl(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); #endif } /************************************************************************* -MKL-based kernel. +MKL-based kernel -Returns True if MKL was present and handled request (MKL completion code -is returned as separate output parameter). + -- ALGLIB routine -- + 01.10.2013 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixherkmkl(ae_int_t n, + ae_int_t k, + double alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + double beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_bool isupper, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; -D and E are pre-allocated arrays with length N (both of them!). On output, -D constraints eigenvalues, and E is destroyed. -Z is preallocated array[N,N] for ZNeeded<>0; ignored for ZNeeded=0. + result = ae_false; + return result; +#else + return _ialglib_i_cmatrixherkmkl(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); +#endif +} -EVDResult is modified if and only if MKL is present. + +/************************************************************************* +MKL-based kernel -- ALGLIB routine -- - 20.10.2014 + 01.10.2013 Bochkanov Sergey *************************************************************************/ -ae_bool smatrixtdevdmkl(/* Real */ ae_vector* d, - /* Real */ ae_vector* e, +ae_bool rmatrixgemmmkl(ae_int_t m, ae_int_t n, - ae_int_t zneeded, - /* Real */ ae_matrix* z, - ae_bool* evdresult, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, ae_state *_state) { #ifndef ALGLIB_INTERCEPTS_MKL @@ -6780,6566 +5909,7757 @@ result = ae_false; return result; #else - return _ialglib_i_smatrixtdevdmkl(d, e, n, zneeded, z, evdresult); + return _ialglib_i_rmatrixgemmmkl(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); #endif } +/************************************************************************* +MKL-based kernel - -double vectornorm2(/* Real */ ae_vector* x, - ae_int_t i1, - ae_int_t i2, + -- ALGLIB routine -- + 01.10.2017 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixsymvmkl(ae_int_t n, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_bool isupper, + /* Real */ ae_vector* x, + ae_int_t ix, + double beta, + /* Real */ ae_vector* y, + ae_int_t iy, ae_state *_state) { - ae_int_t n; - ae_int_t ix; - double absxi; - double scl; - double ssq; - double result; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - n = i2-i1+1; - if( n<1 ) - { - result = (double)(0); - return result; - } - if( n==1 ) - { - result = ae_fabs(x->ptr.p_double[i1], _state); - return result; - } - scl = (double)(0); - ssq = (double)(1); - for(ix=i1; ix<=i2; ix++) - { - if( ae_fp_neq(x->ptr.p_double[ix],(double)(0)) ) - { - absxi = ae_fabs(x->ptr.p_double[ix], _state); - if( ae_fp_less(scl,absxi) ) - { - ssq = 1+ssq*ae_sqr(scl/absxi, _state); - scl = absxi; - } - else - { - ssq = ssq+ae_sqr(absxi/scl, _state); - } - } - } - result = scl*ae_sqrt(ssq, _state); + result = ae_false; return result; +#else + return _ialglib_i_rmatrixsymvmkl(n, alpha, a, ia, ja, isupper, x, ix, beta, y, iy); +#endif } -ae_int_t vectoridxabsmax(/* Real */ ae_vector* x, - ae_int_t i1, - ae_int_t i2, +/************************************************************************* +MKL-based kernel + + -- ALGLIB routine -- + 16.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixgemmmkl(ae_int_t m, + ae_int_t n, + ae_int_t k, + ae_complex alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Complex */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + ae_complex beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, ae_state *_state) { - ae_int_t i; - ae_int_t result; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - result = i1; - for(i=i1+1; i<=i2; i++) - { - if( ae_fp_greater(ae_fabs(x->ptr.p_double[i], _state),ae_fabs(x->ptr.p_double[result], _state)) ) - { - result = i; - } - } + result = ae_false; return result; +#else + return _ialglib_i_cmatrixgemmmkl(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); +#endif } -ae_int_t columnidxabsmax(/* Real */ ae_matrix* x, - ae_int_t i1, - ae_int_t i2, - ae_int_t j, - ae_state *_state) -{ - ae_int_t i; - ae_int_t result; - +/************************************************************************* +MKL-based kernel - result = i1; - for(i=i1+1; i<=i2; i++) - { - if( ae_fp_greater(ae_fabs(x->ptr.pp_double[i][j], _state),ae_fabs(x->ptr.pp_double[result][j], _state)) ) - { - result = i; - } - } - return result; -} - - -ae_int_t rowidxabsmax(/* Real */ ae_matrix* x, + -- ALGLIB routine -- + 16.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixlefttrsmmkl(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, + ae_int_t i1, ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Complex */ ae_matrix* x, + ae_int_t i2, ae_int_t j2, - ae_int_t i, ae_state *_state) { - ae_int_t j; - ae_int_t result; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - result = j1; - for(j=j1+1; j<=j2; j++) - { - if( ae_fp_greater(ae_fabs(x->ptr.pp_double[i][j], _state),ae_fabs(x->ptr.pp_double[i][result], _state)) ) - { - result = j; - } - } + result = ae_false; return result; +#else + return _ialglib_i_cmatrixlefttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } -double upperhessenberg1norm(/* Real */ ae_matrix* a, +/************************************************************************* +MKL-based kernel + + -- ALGLIB routine -- + 16.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixrighttrsmmkl(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, ae_int_t i1, - ae_int_t i2, ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Complex */ ae_matrix* x, + ae_int_t i2, ae_int_t j2, - /* Real */ ae_vector* work, ae_state *_state) { - ae_int_t i; - ae_int_t j; - double result; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - ae_assert(i2-i1==j2-j1, "UpperHessenberg1Norm: I2-I1<>J2-J1!", _state); - for(j=j1; j<=j2; j++) - { - work->ptr.p_double[j] = (double)(0); - } - for(i=i1; i<=i2; i++) - { - for(j=ae_maxint(j1, j1+i-i1-1, _state); j<=j2; j++) - { - work->ptr.p_double[j] = work->ptr.p_double[j]+ae_fabs(a->ptr.pp_double[i][j], _state); - } - } - result = (double)(0); - for(j=j1; j<=j2; j++) - { - result = ae_maxreal(result, work->ptr.p_double[j], _state); - } + result = ae_false; return result; +#else + return _ialglib_i_cmatrixrighttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } -void copymatrix(/* Real */ ae_matrix* a, - ae_int_t is1, - ae_int_t is2, - ae_int_t js1, - ae_int_t js2, - /* Real */ ae_matrix* b, - ae_int_t id1, - ae_int_t id2, - ae_int_t jd1, - ae_int_t jd2, +/************************************************************************* +MKL-based kernel + + -- ALGLIB routine -- + 16.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixlefttrsmmkl(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, ae_state *_state) { - ae_int_t isrc; - ae_int_t idst; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( is1>is2||js1>js2 ) - { - return; - } - ae_assert(is2-is1==id2-id1, "CopyMatrix: different sizes!", _state); - ae_assert(js2-js1==jd2-jd1, "CopyMatrix: different sizes!", _state); - for(isrc=is1; isrc<=is2; isrc++) - { - idst = isrc-is1+id1; - ae_v_move(&b->ptr.pp_double[idst][jd1], 1, &a->ptr.pp_double[isrc][js1], 1, ae_v_len(jd1,jd2)); - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixlefttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } -void inplacetranspose(/* Real */ ae_matrix* a, +/************************************************************************* +MKL-based kernel + + -- ALGLIB routine -- + 16.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixrighttrsmmkl(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, ae_int_t i1, - ae_int_t i2, ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_matrix* x, + ae_int_t i2, ae_int_t j2, - /* Real */ ae_vector* work, ae_state *_state) { - ae_int_t i; - ae_int_t j; - ae_int_t ips; - ae_int_t jps; - ae_int_t l; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( i1>i2||j1>j2 ) - { - return; - } - ae_assert(i1-i2==j1-j2, "InplaceTranspose error: incorrect array size!", _state); - for(i=i1; i<=i2-1; i++) - { - j = j1+i-i1; - ips = i+1; - jps = j1+ips-i1; - l = i2-i; - ae_v_move(&work->ptr.p_double[1], 1, &a->ptr.pp_double[ips][j], a->stride, ae_v_len(1,l)); - ae_v_move(&a->ptr.pp_double[ips][j], a->stride, &a->ptr.pp_double[i][jps], 1, ae_v_len(ips,i2)); - ae_v_move(&a->ptr.pp_double[i][jps], 1, &work->ptr.p_double[1], 1, ae_v_len(jps,j2)); - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixrighttrsmmkl(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } -void copyandtranspose(/* Real */ ae_matrix* a, - ae_int_t is1, - ae_int_t is2, - ae_int_t js1, - ae_int_t js2, - /* Real */ ae_matrix* b, - ae_int_t id1, - ae_int_t id2, - ae_int_t jd1, - ae_int_t jd2, +/************************************************************************* +MKL-based kernel. + +NOTE: + +if function returned False, CholResult is NOT modified. Not ever referenced! +if function returned True, CholResult is set to status of Cholesky decomposition +(True on succeess). + + -- ALGLIB routine -- + 16.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool spdmatrixcholeskymkl(/* Real */ ae_matrix* a, + ae_int_t offs, + ae_int_t n, + ae_bool isupper, + ae_bool* cholresult, ae_state *_state) { - ae_int_t isrc; - ae_int_t jdst; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( is1>is2||js1>js2 ) - { - return; - } - ae_assert(is2-is1==jd2-jd1, "CopyAndTranspose: different sizes!", _state); - ae_assert(js2-js1==id2-id1, "CopyAndTranspose: different sizes!", _state); - for(isrc=is1; isrc<=is2; isrc++) - { - jdst = isrc-is1+jd1; - ae_v_move(&b->ptr.pp_double[id1][jdst], b->stride, &a->ptr.pp_double[isrc][js1], 1, ae_v_len(id1,id2)); - } + result = ae_false; + return result; +#else + return _ialglib_i_spdmatrixcholeskymkl(a, offs, n, isupper, cholresult); +#endif } -void matrixvectormultiply(/* Real */ ae_matrix* a, - ae_int_t i1, - ae_int_t i2, - ae_int_t j1, - ae_int_t j2, - ae_bool trans, - /* Real */ ae_vector* x, - ae_int_t ix1, - ae_int_t ix2, - double alpha, - /* Real */ ae_vector* y, - ae_int_t iy1, - ae_int_t iy2, - double beta, +/************************************************************************* +MKL-based kernel. + + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixplumkl(/* Real */ ae_matrix* a, + ae_int_t offs, + ae_int_t m, + ae_int_t n, + /* Integer */ ae_vector* pivots, ae_state *_state) { - ae_int_t i; - double v; - +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( !trans ) - { - - /* - * y := alpha*A*x + beta*y; - */ - if( i1>i2||j1>j2 ) - { - return; - } - ae_assert(j2-j1==ix2-ix1, "MatrixVectorMultiply: A and X dont match!", _state); - ae_assert(i2-i1==iy2-iy1, "MatrixVectorMultiply: A and Y dont match!", _state); - - /* - * beta*y - */ - if( ae_fp_eq(beta,(double)(0)) ) - { - for(i=iy1; i<=iy2; i++) - { - y->ptr.p_double[i] = (double)(0); - } - } - else - { - ae_v_muld(&y->ptr.p_double[iy1], 1, ae_v_len(iy1,iy2), beta); - } - - /* - * alpha*A*x - */ - for(i=i1; i<=i2; i++) - { - v = ae_v_dotproduct(&a->ptr.pp_double[i][j1], 1, &x->ptr.p_double[ix1], 1, ae_v_len(j1,j2)); - y->ptr.p_double[iy1+i-i1] = y->ptr.p_double[iy1+i-i1]+alpha*v; - } - } - else - { - - /* - * y := alpha*A'*x + beta*y; - */ - if( i1>i2||j1>j2 ) - { - return; - } - ae_assert(i2-i1==ix2-ix1, "MatrixVectorMultiply: A and X dont match!", _state); - ae_assert(j2-j1==iy2-iy1, "MatrixVectorMultiply: A and Y dont match!", _state); - - /* - * beta*y - */ - if( ae_fp_eq(beta,(double)(0)) ) - { - for(i=iy1; i<=iy2; i++) - { - y->ptr.p_double[i] = (double)(0); - } - } - else - { - ae_v_muld(&y->ptr.p_double[iy1], 1, ae_v_len(iy1,iy2), beta); - } - - /* - * alpha*A'*x - */ - for(i=i1; i<=i2; i++) - { - v = alpha*x->ptr.p_double[ix1+i-i1]; - ae_v_addd(&y->ptr.p_double[iy1], 1, &a->ptr.pp_double[i][j1], 1, ae_v_len(iy1,iy2), v); - } - } + + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixplumkl(a, offs, m, n, pivots); +#endif } -double pythag2(double x, double y, ae_state *_state) +/************************************************************************* +MKL-based kernel. + +NOTE: this function needs preallocated output/temporary arrays. + D and E must be at least max(M,N)-wide. + + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixbdmkl(/* Real */ ae_matrix* a, + ae_int_t m, + ae_int_t n, + /* Real */ ae_vector* d, + /* Real */ ae_vector* e, + /* Real */ ae_vector* tauq, + /* Real */ ae_vector* taup, + ae_state *_state) { - double w; - double xabs; - double yabs; - double z; - double result; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - xabs = ae_fabs(x, _state); - yabs = ae_fabs(y, _state); - w = ae_maxreal(xabs, yabs, _state); - z = ae_minreal(xabs, yabs, _state); - if( ae_fp_eq(z,(double)(0)) ) - { - result = w; - } - else - { - result = w*ae_sqrt(1+ae_sqr(z/w, _state), _state); - } + result = ae_false; return result; +#else + return _ialglib_i_rmatrixbdmkl(a, m, n, d, e, tauq, taup); +#endif } -void matrixmatrixmultiply(/* Real */ ae_matrix* a, - ae_int_t ai1, - ae_int_t ai2, - ae_int_t aj1, - ae_int_t aj2, - ae_bool transa, - /* Real */ ae_matrix* b, - ae_int_t bi1, - ae_int_t bi2, - ae_int_t bj1, - ae_int_t bj2, - ae_bool transb, - double alpha, - /* Real */ ae_matrix* c, - ae_int_t ci1, - ae_int_t ci2, - ae_int_t cj1, - ae_int_t cj2, - double beta, - /* Real */ ae_vector* work, +/************************************************************************* +MKL-based kernel. + +If ByQ is True, TauP is not used (can be empty array). +If ByQ is False, TauQ is not used (can be empty array). + + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixbdmultiplybymkl(/* Real */ ae_matrix* qp, + ae_int_t m, + ae_int_t n, + /* Real */ ae_vector* tauq, + /* Real */ ae_vector* taup, + /* Real */ ae_matrix* z, + ae_int_t zrows, + ae_int_t zcolumns, + ae_bool byq, + ae_bool fromtheright, + ae_bool dotranspose, ae_state *_state) { - ae_int_t arows; - ae_int_t acols; - ae_int_t brows; - ae_int_t bcols; - ae_int_t crows; - ae_int_t i; - ae_int_t j; - ae_int_t k; - ae_int_t l; - ae_int_t r; - double v; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - - /* - * Setup - */ - if( !transa ) - { - arows = ai2-ai1+1; - acols = aj2-aj1+1; - } - else - { - arows = aj2-aj1+1; - acols = ai2-ai1+1; - } - if( !transb ) - { - brows = bi2-bi1+1; - bcols = bj2-bj1+1; - } - else - { - brows = bj2-bj1+1; - bcols = bi2-bi1+1; - } - ae_assert(acols==brows, "MatrixMatrixMultiply: incorrect matrix sizes!", _state); - if( ((arows<=0||acols<=0)||brows<=0)||bcols<=0 ) - { - return; - } - crows = arows; - - /* - * Test WORK - */ - i = ae_maxint(arows, acols, _state); - i = ae_maxint(brows, i, _state); - i = ae_maxint(i, bcols, _state); - work->ptr.p_double[1] = (double)(0); - work->ptr.p_double[i] = (double)(0); - - /* - * Prepare C - */ - if( ae_fp_eq(beta,(double)(0)) ) - { - for(i=ci1; i<=ci2; i++) - { - for(j=cj1; j<=cj2; j++) - { - c->ptr.pp_double[i][j] = (double)(0); - } - } - } - else - { - for(i=ci1; i<=ci2; i++) - { - ae_v_muld(&c->ptr.pp_double[i][cj1], 1, ae_v_len(cj1,cj2), beta); - } - } - - /* - * A*B - */ - if( !transa&&!transb ) - { - for(l=ai1; l<=ai2; l++) - { - for(r=bi1; r<=bi2; r++) - { - v = alpha*a->ptr.pp_double[l][aj1+r-bi1]; - k = ci1+l-ai1; - ae_v_addd(&c->ptr.pp_double[k][cj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(cj1,cj2), v); - } - } - return; - } - - /* - * A*B' - */ - if( !transa&&transb ) - { - if( arows*acolsptr.pp_double[l][aj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(aj1,aj2)); - c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1] = c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1]+alpha*v; - } - } - return; - } - else - { - for(l=ai1; l<=ai2; l++) - { - for(r=bi1; r<=bi2; r++) - { - v = ae_v_dotproduct(&a->ptr.pp_double[l][aj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(aj1,aj2)); - c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1] = c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1]+alpha*v; - } - } - return; - } - } - - /* - * A'*B - */ - if( transa&&!transb ) - { - for(l=aj1; l<=aj2; l++) - { - for(r=bi1; r<=bi2; r++) - { - v = alpha*a->ptr.pp_double[ai1+r-bi1][l]; - k = ci1+l-aj1; - ae_v_addd(&c->ptr.pp_double[k][cj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(cj1,cj2), v); - } - } - return; - } - - /* - * A'*B' - */ - if( transa&&transb ) - { - if( arows*acolsptr.p_double[i] = 0.0; - } - for(l=ai1; l<=ai2; l++) - { - v = alpha*b->ptr.pp_double[r][bj1+l-ai1]; - ae_v_addd(&work->ptr.p_double[1], 1, &a->ptr.pp_double[l][aj1], 1, ae_v_len(1,crows), v); - } - ae_v_add(&c->ptr.pp_double[ci1][k], c->stride, &work->ptr.p_double[1], 1, ae_v_len(ci1,ci2)); - } - return; - } - else - { - for(l=aj1; l<=aj2; l++) - { - k = ai2-ai1+1; - ae_v_move(&work->ptr.p_double[1], 1, &a->ptr.pp_double[ai1][l], a->stride, ae_v_len(1,k)); - for(r=bi1; r<=bi2; r++) - { - v = ae_v_dotproduct(&work->ptr.p_double[1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(1,k)); - c->ptr.pp_double[ci1+l-aj1][cj1+r-bi1] = c->ptr.pp_double[ci1+l-aj1][cj1+r-bi1]+alpha*v; - } - } - return; - } - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixbdmultiplybymkl(qp, m, n, tauq, taup, z, zrows, zcolumns, byq, fromtheright, dotranspose); +#endif } +/************************************************************************* +MKL-based kernel. +NOTE: Tau must be preallocated array with at least N-1 elements. -void hermitianmatrixvectormultiply(/* Complex */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, - /* Complex */ ae_vector* x, - ae_complex alpha, - /* Complex */ ae_vector* y, + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixhessenbergmkl(/* Real */ ae_matrix* a, + ae_int_t n, + /* Real */ ae_vector* tau, ae_state *_state) { - ae_int_t i; - ae_int_t ba1; - ae_int_t by1; - ae_int_t by2; - ae_int_t bx1; - ae_int_t bx2; - ae_int_t n; - ae_complex v; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - n = i2-i1+1; - if( n<=0 ) - { - return; - } - - /* - * Let A = L + D + U, where - * L is strictly lower triangular (main diagonal is zero) - * D is diagonal - * U is strictly upper triangular (main diagonal is zero) - * - * A*x = L*x + D*x + U*x - * - * Calculate D*x first - */ - for(i=i1; i<=i2; i++) - { - y->ptr.p_complex[i-i1+1] = ae_c_mul(a->ptr.pp_complex[i][i],x->ptr.p_complex[i-i1+1]); - } - - /* - * Add L*x + U*x - */ - if( isupper ) - { - for(i=i1; i<=i2-1; i++) - { - - /* - * Add L*x to the result - */ - v = x->ptr.p_complex[i-i1+1]; - by1 = i-i1+2; - by2 = n; - ba1 = i+1; - ae_v_caddc(&y->ptr.p_complex[by1], 1, &a->ptr.pp_complex[i][ba1], 1, "Conj", ae_v_len(by1,by2), v); - - /* - * Add U*x to the result - */ - bx1 = i-i1+2; - bx2 = n; - ba1 = i+1; - v = ae_v_cdotproduct(&x->ptr.p_complex[bx1], 1, "N", &a->ptr.pp_complex[i][ba1], 1, "N", ae_v_len(bx1,bx2)); - y->ptr.p_complex[i-i1+1] = ae_c_add(y->ptr.p_complex[i-i1+1],v); - } - } - else - { - for(i=i1+1; i<=i2; i++) - { - - /* - * Add L*x to the result - */ - bx1 = 1; - bx2 = i-i1; - ba1 = i1; - v = ae_v_cdotproduct(&x->ptr.p_complex[bx1], 1, "N", &a->ptr.pp_complex[i][ba1], 1, "N", ae_v_len(bx1,bx2)); - y->ptr.p_complex[i-i1+1] = ae_c_add(y->ptr.p_complex[i-i1+1],v); - - /* - * Add U*x to the result - */ - v = x->ptr.p_complex[i-i1+1]; - by1 = 1; - by2 = i-i1; - ba1 = i1; - ae_v_caddc(&y->ptr.p_complex[by1], 1, &a->ptr.pp_complex[i][ba1], 1, "Conj", ae_v_len(by1,by2), v); - } - } - ae_v_cmulc(&y->ptr.p_complex[1], 1, ae_v_len(1,n), alpha); + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixhessenbergmkl(a, n, tau); +#endif } -void hermitianrank2update(/* Complex */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, - /* Complex */ ae_vector* x, - /* Complex */ ae_vector* y, - /* Complex */ ae_vector* t, - ae_complex alpha, +/************************************************************************* +MKL-based kernel. + +NOTE: Q must be preallocated N*N array + + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixhessenbergunpackqmkl(/* Real */ ae_matrix* a, + ae_int_t n, + /* Real */ ae_vector* tau, + /* Real */ ae_matrix* q, ae_state *_state) { - ae_int_t i; - ae_int_t tp1; - ae_int_t tp2; - ae_complex v; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( isupper ) - { - for(i=i1; i<=i2; i++) - { - tp1 = i+1-i1; - tp2 = i2-i1+1; - v = ae_c_mul(alpha,x->ptr.p_complex[i+1-i1]); - ae_v_cmovec(&t->ptr.p_complex[tp1], 1, &y->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); - v = ae_c_mul(ae_c_conj(alpha, _state),y->ptr.p_complex[i+1-i1]); - ae_v_caddc(&t->ptr.p_complex[tp1], 1, &x->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); - ae_v_cadd(&a->ptr.pp_complex[i][i], 1, &t->ptr.p_complex[tp1], 1, "N", ae_v_len(i,i2)); - } - } - else - { - for(i=i1; i<=i2; i++) - { - tp1 = 1; - tp2 = i+1-i1; - v = ae_c_mul(alpha,x->ptr.p_complex[i+1-i1]); - ae_v_cmovec(&t->ptr.p_complex[tp1], 1, &y->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); - v = ae_c_mul(ae_c_conj(alpha, _state),y->ptr.p_complex[i+1-i1]); - ae_v_caddc(&t->ptr.p_complex[tp1], 1, &x->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); - ae_v_cadd(&a->ptr.pp_complex[i][i1], 1, &t->ptr.p_complex[tp1], 1, "N", ae_v_len(i1,i)); - } - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixhessenbergunpackqmkl(a, n, tau, q); +#endif } - - /************************************************************************* -Generation of an elementary reflection transformation - -The subroutine generates elementary reflection H of order N, so that, for -a given X, the following equality holds true: +MKL-based kernel. - ( X(1) ) ( Beta ) -H * ( .. ) = ( 0 ) - ( X(n) ) ( 0 ) +NOTE: Tau, D, E must be preallocated arrays; + length(E)=length(Tau)=N-1 (or larger) + length(D)=N (or larger) -where - ( V(1) ) -H = 1 - Tau * ( .. ) * ( V(1), ..., V(n) ) - ( V(n) ) + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool smatrixtdmkl(/* Real */ ae_matrix* a, + ae_int_t n, + ae_bool isupper, + /* Real */ ae_vector* tau, + /* Real */ ae_vector* d, + /* Real */ ae_vector* e, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; -where the first component of vector V equals 1. -Input parameters: - X - vector. Array whose index ranges within [1..N]. - N - reflection order. + result = ae_false; + return result; +#else + return _ialglib_i_smatrixtdmkl(a, n, isupper, tau, d, e); +#endif +} -Output parameters: - X - components from 2 to N are replaced with vector V. - The first component is replaced with parameter Beta. - Tau - scalar value Tau. If X is a null vector, Tau equals 0, - otherwise 1 <= Tau <= 2. -This subroutine is the modification of the DLARFG subroutines from -the LAPACK library. +/************************************************************************* +MKL-based kernel. -MODIFICATIONS: - 24.12.2005 sign(Alpha) was replaced with an analogous to the Fortran SIGN code. +NOTE: Q must be preallocated N*N array - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey *************************************************************************/ -void generatereflection(/* Real */ ae_vector* x, +ae_bool smatrixtdunpackqmkl(/* Real */ ae_matrix* a, ae_int_t n, - double* tau, + ae_bool isupper, + /* Real */ ae_vector* tau, + /* Real */ ae_matrix* q, ae_state *_state) { - ae_int_t j; - double alpha; - double xnorm; - double v; - double beta; - double mx; - double s; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - *tau = 0; - if( n<=1 ) - { - *tau = (double)(0); - return; - } - - /* - * Scale if needed (to avoid overflow/underflow during intermediate - * calculations). - */ - mx = (double)(0); - for(j=1; j<=n; j++) - { - mx = ae_maxreal(ae_fabs(x->ptr.p_double[j], _state), mx, _state); - } - s = (double)(1); - if( ae_fp_neq(mx,(double)(0)) ) - { - if( ae_fp_less_eq(mx,ae_minrealnumber/ae_machineepsilon) ) - { - s = ae_minrealnumber/ae_machineepsilon; - v = 1/s; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), v); - mx = mx*v; - } - else - { - if( ae_fp_greater_eq(mx,ae_maxrealnumber*ae_machineepsilon) ) - { - s = ae_maxrealnumber*ae_machineepsilon; - v = 1/s; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), v); - mx = mx*v; - } - } - } - - /* - * XNORM = DNRM2( N-1, X, INCX ) - */ - alpha = x->ptr.p_double[1]; - xnorm = (double)(0); - if( ae_fp_neq(mx,(double)(0)) ) - { - for(j=2; j<=n; j++) - { - xnorm = xnorm+ae_sqr(x->ptr.p_double[j]/mx, _state); - } - xnorm = ae_sqrt(xnorm, _state)*mx; - } - if( ae_fp_eq(xnorm,(double)(0)) ) - { - - /* - * H = I - */ - *tau = (double)(0); - x->ptr.p_double[1] = x->ptr.p_double[1]*s; - return; - } - - /* - * general case - */ - mx = ae_maxreal(ae_fabs(alpha, _state), ae_fabs(xnorm, _state), _state); - beta = -mx*ae_sqrt(ae_sqr(alpha/mx, _state)+ae_sqr(xnorm/mx, _state), _state); - if( ae_fp_less(alpha,(double)(0)) ) - { - beta = -beta; - } - *tau = (beta-alpha)/beta; - v = 1/(alpha-beta); - ae_v_muld(&x->ptr.p_double[2], 1, ae_v_len(2,n), v); - x->ptr.p_double[1] = beta; - - /* - * Scale back outputs - */ - x->ptr.p_double[1] = x->ptr.p_double[1]*s; + result = ae_false; + return result; +#else + return _ialglib_i_smatrixtdunpackqmkl(a, n, isupper, tau, q); +#endif } /************************************************************************* -Application of an elementary reflection to a rectangular matrix of size MxN - -The algorithm pre-multiplies the matrix by an elementary reflection transformation -which is given by column V and scalar Tau (see the description of the -GenerateReflection procedure). Not the whole matrix but only a part of it -is transformed (rows from M1 to M2, columns from N1 to N2). Only the elements -of this submatrix are changed. - -Input parameters: - C - matrix to be transformed. - Tau - scalar defining the transformation. - V - column defining the transformation. - Array whose index ranges within [1..M2-M1+1]. - M1, M2 - range of rows to be transformed. - N1, N2 - range of columns to be transformed. - WORK - working array whose indexes goes from N1 to N2. +MKL-based kernel. -Output parameters: - C - the result of multiplying the input matrix C by the - transformation matrix which is given by Tau and V. - If N1>N2 or M1>M2, C is not modified. +NOTE: Tau, D, E must be preallocated arrays; + length(E)=length(Tau)=N-1 (or larger) + length(D)=N (or larger) - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey *************************************************************************/ -void applyreflectionfromtheleft(/* Real */ ae_matrix* c, - double tau, - /* Real */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* work, +ae_bool hmatrixtdmkl(/* Complex */ ae_matrix* a, + ae_int_t n, + ae_bool isupper, + /* Complex */ ae_vector* tau, + /* Real */ ae_vector* d, + /* Real */ ae_vector* e, ae_state *_state) { - double t; - ae_int_t i; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( (ae_fp_eq(tau,(double)(0))||n1>n2)||m1>m2 ) - { - return; - } - - /* - * w := C' * v - */ - for(i=n1; i<=n2; i++) - { - work->ptr.p_double[i] = (double)(0); - } - for(i=m1; i<=m2; i++) - { - t = v->ptr.p_double[i+1-m1]; - ae_v_addd(&work->ptr.p_double[n1], 1, &c->ptr.pp_double[i][n1], 1, ae_v_len(n1,n2), t); - } - - /* - * C := C - tau * v * w' - */ - for(i=m1; i<=m2; i++) - { - t = v->ptr.p_double[i-m1+1]*tau; - ae_v_subd(&c->ptr.pp_double[i][n1], 1, &work->ptr.p_double[n1], 1, ae_v_len(n1,n2), t); - } + result = ae_false; + return result; +#else + return _ialglib_i_hmatrixtdmkl(a, n, isupper, tau, d, e); +#endif } /************************************************************************* -Application of an elementary reflection to a rectangular matrix of size MxN - -The algorithm post-multiplies the matrix by an elementary reflection transformation -which is given by column V and scalar Tau (see the description of the -GenerateReflection procedure). Not the whole matrix but only a part of it -is transformed (rows from M1 to M2, columns from N1 to N2). Only the -elements of this submatrix are changed. - -Input parameters: - C - matrix to be transformed. - Tau - scalar defining the transformation. - V - column defining the transformation. - Array whose index ranges within [1..N2-N1+1]. - M1, M2 - range of rows to be transformed. - N1, N2 - range of columns to be transformed. - WORK - working array whose indexes goes from M1 to M2. +MKL-based kernel. -Output parameters: - C - the result of multiplying the input matrix C by the - transformation matrix which is given by Tau and V. - If N1>N2 or M1>M2, C is not modified. +NOTE: Q must be preallocated N*N array - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey *************************************************************************/ -void applyreflectionfromtheright(/* Real */ ae_matrix* c, - double tau, - /* Real */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* work, +ae_bool hmatrixtdunpackqmkl(/* Complex */ ae_matrix* a, + ae_int_t n, + ae_bool isupper, + /* Complex */ ae_vector* tau, + /* Complex */ ae_matrix* q, ae_state *_state) { - double t; - ae_int_t i; - ae_int_t vm; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( (ae_fp_eq(tau,(double)(0))||n1>n2)||m1>m2 ) - { - return; - } - vm = n2-n1+1; - for(i=m1; i<=m2; i++) - { - t = ae_v_dotproduct(&c->ptr.pp_double[i][n1], 1, &v->ptr.p_double[1], 1, ae_v_len(n1,n2)); - t = t*tau; - ae_v_subd(&c->ptr.pp_double[i][n1], 1, &v->ptr.p_double[1], 1, ae_v_len(n1,n2), t); - } - - /* - * This line is necessary to avoid spurious compiler warnings - */ - touchint(&vm, _state); + result = ae_false; + return result; +#else + return _ialglib_i_hmatrixtdunpackqmkl(a, n, isupper, tau, q); +#endif } +/************************************************************************* +MKL-based kernel. +Returns True if MKL was present and handled request (MKL completion code +is returned as separate output parameter). -/************************************************************************* -Generation of an elementary complex reflection transformation +D and E are pre-allocated arrays with length N (both of them!). On output, +D constraints singular values, and E is destroyed. -The subroutine generates elementary complex reflection H of order N, so -that, for a given X, the following equality holds true: +SVDResult is modified if and only if MKL is present. - ( X(1) ) ( Beta ) -H' * ( .. ) = ( 0 ), H'*H = I, Beta is a real number - ( X(n) ) ( 0 ) + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixbdsvdmkl(/* Real */ ae_vector* d, + /* Real */ ae_vector* e, + ae_int_t n, + ae_bool isupper, + /* Real */ ae_matrix* u, + ae_int_t nru, + /* Real */ ae_matrix* c, + ae_int_t ncc, + /* Real */ ae_matrix* vt, + ae_int_t ncvt, + ae_bool* svdresult, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; -where - ( V(1) ) -H = 1 - Tau * ( .. ) * ( conj(V(1)), ..., conj(V(n)) ) - ( V(n) ) + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixbdsvdmkl(d, e, n, isupper, u, nru, c, ncc, vt, ncvt, svdresult); +#endif +} -where the first component of vector V equals 1. -Input parameters: - X - vector. Array with elements [1..N]. - N - reflection order. +/************************************************************************* +MKL-based DHSEQR kernel. -Output parameters: - X - components from 2 to N are replaced by vector V. - The first component is replaced with parameter Beta. - Tau - scalar value Tau. +Returns True if MKL was present and handled request. -This subroutine is the modification of CLARFG subroutines from the LAPACK -library. It has similar functionality except for the fact that it doesn’t -handle errors when intermediate results cause an overflow. +WR and WI are pre-allocated arrays with length N. +Z is pre-allocated array[N,N]. - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey *************************************************************************/ -void complexgeneratereflection(/* Complex */ ae_vector* x, +ae_bool rmatrixinternalschurdecompositionmkl(/* Real */ ae_matrix* h, ae_int_t n, - ae_complex* tau, + ae_int_t tneeded, + ae_int_t zneeded, + /* Real */ ae_vector* wr, + /* Real */ ae_vector* wi, + /* Real */ ae_matrix* z, + ae_int_t* info, ae_state *_state) { - ae_int_t j; - ae_complex alpha; - double alphi; - double alphr; - double beta; - double xnorm; - double mx; - ae_complex t; - double s; - ae_complex v; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - tau->x = 0; - tau->y = 0; - if( n<=0 ) - { - *tau = ae_complex_from_i(0); - return; - } - - /* - * Scale if needed (to avoid overflow/underflow during intermediate - * calculations). - */ - mx = (double)(0); - for(j=1; j<=n; j++) - { - mx = ae_maxreal(ae_c_abs(x->ptr.p_complex[j], _state), mx, _state); - } - s = (double)(1); - if( ae_fp_neq(mx,(double)(0)) ) - { - if( ae_fp_less(mx,(double)(1)) ) - { - s = ae_sqrt(ae_minrealnumber, _state); - v = ae_complex_from_d(1/s); - ae_v_cmulc(&x->ptr.p_complex[1], 1, ae_v_len(1,n), v); - } - else - { - s = ae_sqrt(ae_maxrealnumber, _state); - v = ae_complex_from_d(1/s); - ae_v_cmulc(&x->ptr.p_complex[1], 1, ae_v_len(1,n), v); - } - } - - /* - * calculate - */ - alpha = x->ptr.p_complex[1]; - mx = (double)(0); - for(j=2; j<=n; j++) - { - mx = ae_maxreal(ae_c_abs(x->ptr.p_complex[j], _state), mx, _state); - } - xnorm = (double)(0); - if( ae_fp_neq(mx,(double)(0)) ) - { - for(j=2; j<=n; j++) - { - t = ae_c_div_d(x->ptr.p_complex[j],mx); - xnorm = xnorm+ae_c_mul(t,ae_c_conj(t, _state)).x; - } - xnorm = ae_sqrt(xnorm, _state)*mx; - } - alphr = alpha.x; - alphi = alpha.y; - if( ae_fp_eq(xnorm,(double)(0))&&ae_fp_eq(alphi,(double)(0)) ) - { - *tau = ae_complex_from_i(0); - x->ptr.p_complex[1] = ae_c_mul_d(x->ptr.p_complex[1],s); - return; - } - mx = ae_maxreal(ae_fabs(alphr, _state), ae_fabs(alphi, _state), _state); - mx = ae_maxreal(mx, ae_fabs(xnorm, _state), _state); - beta = -mx*ae_sqrt(ae_sqr(alphr/mx, _state)+ae_sqr(alphi/mx, _state)+ae_sqr(xnorm/mx, _state), _state); - if( ae_fp_less(alphr,(double)(0)) ) - { - beta = -beta; - } - tau->x = (beta-alphr)/beta; - tau->y = -alphi/beta; - alpha = ae_c_d_div(1,ae_c_sub_d(alpha,beta)); - if( n>1 ) - { - ae_v_cmulc(&x->ptr.p_complex[2], 1, ae_v_len(2,n), alpha); - } - alpha = ae_complex_from_d(beta); - x->ptr.p_complex[1] = alpha; - - /* - * Scale back - */ - x->ptr.p_complex[1] = ae_c_mul_d(x->ptr.p_complex[1],s); + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixinternalschurdecompositionmkl(h, n, tneeded, zneeded, wr, wi, z, info); +#endif } /************************************************************************* -Application of an elementary reflection to a rectangular matrix of size MxN - -The algorithm pre-multiplies the matrix by an elementary reflection -transformation which is given by column V and scalar Tau (see the -description of the GenerateReflection). Not the whole matrix but only a -part of it is transformed (rows from M1 to M2, columns from N1 to N2). Only -the elements of this submatrix are changed. +MKL-based DTREVC kernel. -Note: the matrix is multiplied by H, not by H'. If it is required to -multiply the matrix by H', it is necessary to pass Conj(Tau) instead of Tau. +Returns True if MKL was present and handled request. -Input parameters: - C - matrix to be transformed. - Tau - scalar defining transformation. - V - column defining transformation. - Array whose index ranges within [1..M2-M1+1] - M1, M2 - range of rows to be transformed. - N1, N2 - range of columns to be transformed. - WORK - working array whose index goes from N1 to N2. +NOTE: this function does NOT support HOWMNY=3!!!! -Output parameters: - C - the result of multiplying the input matrix C by the - transformation matrix which is given by Tau and V. - If N1>N2 or M1>M2, C is not modified. +VL and VR are pre-allocated arrays with length N*N, if required. If particalar +variables is not required, it can be dummy (empty) array. - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey *************************************************************************/ -void complexapplyreflectionfromtheleft(/* Complex */ ae_matrix* c, - ae_complex tau, - /* Complex */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Complex */ ae_vector* work, +ae_bool rmatrixinternaltrevcmkl(/* Real */ ae_matrix* t, + ae_int_t n, + ae_int_t side, + ae_int_t howmny, + /* Real */ ae_matrix* vl, + /* Real */ ae_matrix* vr, + ae_int_t* m, + ae_int_t* info, ae_state *_state) { - ae_complex t; - ae_int_t i; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( (ae_c_eq_d(tau,(double)(0))||n1>n2)||m1>m2 ) - { - return; - } - - /* - * w := C^T * conj(v) - */ - for(i=n1; i<=n2; i++) - { - work->ptr.p_complex[i] = ae_complex_from_i(0); - } - for(i=m1; i<=m2; i++) - { - t = ae_c_conj(v->ptr.p_complex[i+1-m1], _state); - ae_v_caddc(&work->ptr.p_complex[n1], 1, &c->ptr.pp_complex[i][n1], 1, "N", ae_v_len(n1,n2), t); - } - - /* - * C := C - tau * v * w^T - */ - for(i=m1; i<=m2; i++) - { - t = ae_c_mul(v->ptr.p_complex[i-m1+1],tau); - ae_v_csubc(&c->ptr.pp_complex[i][n1], 1, &work->ptr.p_complex[n1], 1, "N", ae_v_len(n1,n2), t); - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixinternaltrevcmkl(t, n, side, howmny, vl, vr, m, info); +#endif } /************************************************************************* -Application of an elementary reflection to a rectangular matrix of size MxN +MKL-based kernel. -The algorithm post-multiplies the matrix by an elementary reflection -transformation which is given by column V and scalar Tau (see the -description of the GenerateReflection). Not the whole matrix but only a -part of it is transformed (rows from M1 to M2, columns from N1 to N2). -Only the elements of this submatrix are changed. +Returns True if MKL was present and handled request (MKL completion code +is returned as separate output parameter). -Input parameters: - C - matrix to be transformed. - Tau - scalar defining transformation. - V - column defining transformation. - Array whose index ranges within [1..N2-N1+1] - M1, M2 - range of rows to be transformed. - N1, N2 - range of columns to be transformed. - WORK - working array whose index goes from M1 to M2. +D and E are pre-allocated arrays with length N (both of them!). On output, +D constraints eigenvalues, and E is destroyed. -Output parameters: - C - the result of multiplying the input matrix C by the - transformation matrix which is given by Tau and V. - If N1>N2 or M1>M2, C is not modified. +Z is preallocated array[N,N] for ZNeeded<>0; ignored for ZNeeded=0. - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - September 30, 1994 +EVDResult is modified if and only if MKL is present. + + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey *************************************************************************/ -void complexapplyreflectionfromtheright(/* Complex */ ae_matrix* c, - ae_complex tau, - /* Complex */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Complex */ ae_vector* work, +ae_bool smatrixtdevdmkl(/* Real */ ae_vector* d, + /* Real */ ae_vector* e, + ae_int_t n, + ae_int_t zneeded, + /* Real */ ae_matrix* z, + ae_bool* evdresult, ae_state *_state) { - ae_complex t; - ae_int_t i; - ae_int_t vm; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( (ae_c_eq_d(tau,(double)(0))||n1>n2)||m1>m2 ) - { - return; - } - - /* - * w := C * v - */ - vm = n2-n1+1; - for(i=m1; i<=m2; i++) - { - t = ae_v_cdotproduct(&c->ptr.pp_complex[i][n1], 1, "N", &v->ptr.p_complex[1], 1, "N", ae_v_len(n1,n2)); - work->ptr.p_complex[i] = t; - } - - /* - * C := C - w * conj(v^T) - */ - ae_v_cmove(&v->ptr.p_complex[1], 1, &v->ptr.p_complex[1], 1, "Conj", ae_v_len(1,vm)); - for(i=m1; i<=m2; i++) - { - t = ae_c_mul(work->ptr.p_complex[i],tau); - ae_v_csubc(&c->ptr.pp_complex[i][n1], 1, &v->ptr.p_complex[1], 1, "N", ae_v_len(n1,n2), t); - } - ae_v_cmove(&v->ptr.p_complex[1], 1, &v->ptr.p_complex[1], 1, "Conj", ae_v_len(1,vm)); + result = ae_false; + return result; +#else + return _ialglib_i_smatrixtdevdmkl(d, e, n, zneeded, z, evdresult); +#endif } +/************************************************************************* +MKL-based kernel. +Returns True if MKL was present and handled request (MKL completion code +is returned as separate output parameter). -void symmetricmatrixvectormultiply(/* Real */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, - /* Real */ ae_vector* x, - double alpha, - /* Real */ ae_vector* y, - ae_state *_state) -{ - ae_int_t i; - ae_int_t ba1; - ae_int_t ba2; - ae_int_t by1; - ae_int_t by2; - ae_int_t bx1; - ae_int_t bx2; - ae_int_t n; - double v; - +D and E are pre-allocated arrays with length N (both of them!). On output, +D constraints eigenvalues, and E is destroyed. - n = i2-i1+1; - if( n<=0 ) - { - return; - } - - /* - * Let A = L + D + U, where - * L is strictly lower triangular (main diagonal is zero) - * D is diagonal - * U is strictly upper triangular (main diagonal is zero) - * - * A*x = L*x + D*x + U*x - * - * Calculate D*x first - */ - for(i=i1; i<=i2; i++) - { - y->ptr.p_double[i-i1+1] = a->ptr.pp_double[i][i]*x->ptr.p_double[i-i1+1]; - } - - /* - * Add L*x + U*x - */ - if( isupper ) - { - for(i=i1; i<=i2-1; i++) - { - - /* - * Add L*x to the result - */ - v = x->ptr.p_double[i-i1+1]; - by1 = i-i1+2; - by2 = n; - ba1 = i+1; - ba2 = i2; - ae_v_addd(&y->ptr.p_double[by1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(by1,by2), v); - - /* - * Add U*x to the result - */ - bx1 = i-i1+2; - bx2 = n; - ba1 = i+1; - ba2 = i2; - v = ae_v_dotproduct(&x->ptr.p_double[bx1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(bx1,bx2)); - y->ptr.p_double[i-i1+1] = y->ptr.p_double[i-i1+1]+v; - } - } - else - { - for(i=i1+1; i<=i2; i++) - { - - /* - * Add L*x to the result - */ - bx1 = 1; - bx2 = i-i1; - ba1 = i1; - ba2 = i-1; - v = ae_v_dotproduct(&x->ptr.p_double[bx1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(bx1,bx2)); - y->ptr.p_double[i-i1+1] = y->ptr.p_double[i-i1+1]+v; - - /* - * Add U*x to the result - */ - v = x->ptr.p_double[i-i1+1]; - by1 = 1; - by2 = i-i1; - ba1 = i1; - ba2 = i-1; - ae_v_addd(&y->ptr.p_double[by1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(by1,by2), v); - } - } - ae_v_muld(&y->ptr.p_double[1], 1, ae_v_len(1,n), alpha); - touchint(&ba2, _state); -} +Z is preallocated array[N,N] for ZNeeded<>0; ignored for ZNeeded=0. +EVDResult is modified if and only if MKL is present. -void symmetricrank2update(/* Real */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, + -- ALGLIB routine -- + 20.10.2014 + Bochkanov Sergey +*************************************************************************/ +ae_bool sparsegemvcrsmkl(ae_int_t opa, + ae_int_t arows, + ae_int_t acols, + double alpha, + /* Real */ ae_vector* vals, + /* Integer */ ae_vector* cidx, + /* Integer */ ae_vector* ridx, /* Real */ ae_vector* x, + ae_int_t ix, + double beta, /* Real */ ae_vector* y, - /* Real */ ae_vector* t, - double alpha, + ae_int_t iy, ae_state *_state) { - ae_int_t i; - ae_int_t tp1; - ae_int_t tp2; - double v; +#ifndef ALGLIB_INTERCEPTS_MKL + ae_bool result; - if( isupper ) - { - for(i=i1; i<=i2; i++) - { - tp1 = i+1-i1; - tp2 = i2-i1+1; - v = x->ptr.p_double[i+1-i1]; - ae_v_moved(&t->ptr.p_double[tp1], 1, &y->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); - v = y->ptr.p_double[i+1-i1]; - ae_v_addd(&t->ptr.p_double[tp1], 1, &x->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); - ae_v_muld(&t->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), alpha); - ae_v_add(&a->ptr.pp_double[i][i], 1, &t->ptr.p_double[tp1], 1, ae_v_len(i,i2)); - } - } - else - { - for(i=i1; i<=i2; i++) - { - tp1 = 1; - tp2 = i+1-i1; - v = x->ptr.p_double[i+1-i1]; - ae_v_moved(&t->ptr.p_double[tp1], 1, &y->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); - v = y->ptr.p_double[i+1-i1]; - ae_v_addd(&t->ptr.p_double[tp1], 1, &x->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); - ae_v_muld(&t->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), alpha); - ae_v_add(&a->ptr.pp_double[i][i1], 1, &t->ptr.p_double[tp1], 1, ae_v_len(i1,i)); - } - } + result = ae_false; + return result; +#else + return _ialglib_i_sparsegemvcrsmkl(opa, arows, acols, alpha, vals, cidx, ridx, x, ix, beta, y, iy); +#endif } +#endif +#if defined(AE_COMPILE_ABLASF) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Application of a sequence of elementary rotations to a matrix - -The algorithm pre-multiplies the matrix by a sequence of rotation -transformations which is given by arrays C and S. Depending on the value -of the IsForward parameter either 1 and 2, 3 and 4 and so on (if IsForward=true) -rows are rotated, or the rows N and N-1, N-2 and N-3 and so on, are rotated. - -Not the whole matrix but only a part of it is transformed (rows from M1 to -M2, columns from N1 to N2). Only the elements of this submatrix are changed. - -Input parameters: - IsForward - the sequence of the rotation application. - M1,M2 - the range of rows to be transformed. - N1, N2 - the range of columns to be transformed. - C,S - transformation coefficients. - Array whose index ranges within [1..M2-M1]. - A - processed matrix. - WORK - working array whose index ranges within [N1..N2]. - -Output parameters: - A - transformed matrix. +Fast kernel -Utility subroutine. + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey *************************************************************************/ -void applyrotationsfromtheleft(ae_bool isforward, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* c, - /* Real */ ae_vector* s, +ae_bool rmatrixgerf(ae_int_t m, + ae_int_t n, /* Real */ ae_matrix* a, - /* Real */ ae_vector* work, + ae_int_t ia, + ae_int_t ja, + double ralpha, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, ae_state *_state) { - ae_int_t j; - ae_int_t jp1; - double ctemp; - double stemp; - double temp; +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; - if( m1>m2||n1>n2 ) - { - return; - } - - /* - * Form P * A - */ - if( isforward ) - { - if( n1!=n2 ) - { - - /* - * Common case: N1<>N2 - */ - for(j=m1; j<=m2-1; j++) - { - ctemp = c->ptr.p_double[j-m1+1]; - stemp = s->ptr.p_double[j-m1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - jp1 = j+1; - ae_v_moved(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), ctemp); - ae_v_subd(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), stemp); - ae_v_muld(&a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), ctemp); - ae_v_addd(&a->ptr.pp_double[j][n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), stemp); - ae_v_move(&a->ptr.pp_double[jp1][n1], 1, &work->ptr.p_double[n1], 1, ae_v_len(n1,n2)); - } - } - } - else - { - - /* - * Special case: N1=N2 - */ - for(j=m1; j<=m2-1; j++) - { - ctemp = c->ptr.p_double[j-m1+1]; - stemp = s->ptr.p_double[j-m1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - temp = a->ptr.pp_double[j+1][n1]; - a->ptr.pp_double[j+1][n1] = ctemp*temp-stemp*a->ptr.pp_double[j][n1]; - a->ptr.pp_double[j][n1] = stemp*temp+ctemp*a->ptr.pp_double[j][n1]; - } - } - } - } - else - { - if( n1!=n2 ) - { - - /* - * Common case: N1<>N2 - */ - for(j=m2-1; j>=m1; j--) - { - ctemp = c->ptr.p_double[j-m1+1]; - stemp = s->ptr.p_double[j-m1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - jp1 = j+1; - ae_v_moved(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), ctemp); - ae_v_subd(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), stemp); - ae_v_muld(&a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), ctemp); - ae_v_addd(&a->ptr.pp_double[j][n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), stemp); - ae_v_move(&a->ptr.pp_double[jp1][n1], 1, &work->ptr.p_double[n1], 1, ae_v_len(n1,n2)); - } - } - } - else - { - - /* - * Special case: N1=N2 - */ - for(j=m2-1; j>=m1; j--) - { - ctemp = c->ptr.p_double[j-m1+1]; - stemp = s->ptr.p_double[j-m1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - temp = a->ptr.pp_double[j+1][n1]; - a->ptr.pp_double[j+1][n1] = ctemp*temp-stemp*a->ptr.pp_double[j][n1]; - a->ptr.pp_double[j][n1] = stemp*temp+ctemp*a->ptr.pp_double[j][n1]; - } - } - } - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixgerf(m, n, a, ia, ja, ralpha, u, iu, v, iv); +#endif } /************************************************************************* -Application of a sequence of elementary rotations to a matrix - -The algorithm post-multiplies the matrix by a sequence of rotation -transformations which is given by arrays C and S. Depending on the value -of the IsForward parameter either 1 and 2, 3 and 4 and so on (if IsForward=true) -rows are rotated, or the rows N and N-1, N-2 and N-3 and so on are rotated. - -Not the whole matrix but only a part of it is transformed (rows from M1 -to M2, columns from N1 to N2). Only the elements of this submatrix are changed. - -Input parameters: - IsForward - the sequence of the rotation application. - M1,M2 - the range of rows to be transformed. - N1, N2 - the range of columns to be transformed. - C,S - transformation coefficients. - Array whose index ranges within [1..N2-N1]. - A - processed matrix. - WORK - working array whose index ranges within [M1..M2]. - -Output parameters: - A - transformed matrix. +Fast kernel -Utility subroutine. + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey *************************************************************************/ -void applyrotationsfromtheright(ae_bool isforward, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* c, - /* Real */ ae_vector* s, - /* Real */ ae_matrix* a, - /* Real */ ae_vector* work, +ae_bool cmatrixrank1f(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Complex */ ae_vector* u, + ae_int_t iu, + /* Complex */ ae_vector* v, + ae_int_t iv, ae_state *_state) { - ae_int_t j; - ae_int_t jp1; - double ctemp; - double stemp; - double temp; +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; - - /* - * Form A * P' - */ - if( isforward ) - { - if( m1!=m2 ) - { - - /* - * Common case: M1<>M2 - */ - for(j=n1; j<=n2-1; j++) - { - ctemp = c->ptr.p_double[j-n1+1]; - stemp = s->ptr.p_double[j-n1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - jp1 = j+1; - ae_v_moved(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), ctemp); - ae_v_subd(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), stemp); - ae_v_muld(&a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), ctemp); - ae_v_addd(&a->ptr.pp_double[m1][j], a->stride, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), stemp); - ae_v_move(&a->ptr.pp_double[m1][jp1], a->stride, &work->ptr.p_double[m1], 1, ae_v_len(m1,m2)); - } - } - } - else - { - - /* - * Special case: M1=M2 - */ - for(j=n1; j<=n2-1; j++) - { - ctemp = c->ptr.p_double[j-n1+1]; - stemp = s->ptr.p_double[j-n1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - temp = a->ptr.pp_double[m1][j+1]; - a->ptr.pp_double[m1][j+1] = ctemp*temp-stemp*a->ptr.pp_double[m1][j]; - a->ptr.pp_double[m1][j] = stemp*temp+ctemp*a->ptr.pp_double[m1][j]; - } - } - } - } - else - { - if( m1!=m2 ) - { - - /* - * Common case: M1<>M2 - */ - for(j=n2-1; j>=n1; j--) - { - ctemp = c->ptr.p_double[j-n1+1]; - stemp = s->ptr.p_double[j-n1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - jp1 = j+1; - ae_v_moved(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), ctemp); - ae_v_subd(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), stemp); - ae_v_muld(&a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), ctemp); - ae_v_addd(&a->ptr.pp_double[m1][j], a->stride, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), stemp); - ae_v_move(&a->ptr.pp_double[m1][jp1], a->stride, &work->ptr.p_double[m1], 1, ae_v_len(m1,m2)); - } - } - } - else - { - - /* - * Special case: M1=M2 - */ - for(j=n2-1; j>=n1; j--) - { - ctemp = c->ptr.p_double[j-n1+1]; - stemp = s->ptr.p_double[j-n1+1]; - if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) - { - temp = a->ptr.pp_double[m1][j+1]; - a->ptr.pp_double[m1][j+1] = ctemp*temp-stemp*a->ptr.pp_double[m1][j]; - a->ptr.pp_double[m1][j] = stemp*temp+ctemp*a->ptr.pp_double[m1][j]; - } - } - } - } + result = ae_false; + return result; +#else + return _ialglib_i_cmatrixrank1f(m, n, a, ia, ja, u, iu, v, iv); +#endif } /************************************************************************* -The subroutine generates the elementary rotation, so that: - -[ CS SN ] . [ F ] = [ R ] -[ -SN CS ] [ G ] [ 0 ] +Fast kernel -CS**2 + SN**2 = 1 + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey *************************************************************************/ -void generaterotation(double f, - double g, - double* cs, - double* sn, - double* r, - ae_state *_state) -{ - double f1; - double g1; +ae_bool rmatrixrank1f(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; - *cs = 0; - *sn = 0; - *r = 0; - if( ae_fp_eq(g,(double)(0)) ) - { - *cs = (double)(1); - *sn = (double)(0); - *r = f; - } - else - { - if( ae_fp_eq(f,(double)(0)) ) - { - *cs = (double)(0); - *sn = (double)(1); - *r = g; - } - else - { - f1 = f; - g1 = g; - if( ae_fp_greater(ae_fabs(f1, _state),ae_fabs(g1, _state)) ) - { - *r = ae_fabs(f1, _state)*ae_sqrt(1+ae_sqr(g1/f1, _state), _state); - } - else - { - *r = ae_fabs(g1, _state)*ae_sqrt(1+ae_sqr(f1/g1, _state), _state); - } - *cs = f1/(*r); - *sn = g1/(*r); - if( ae_fp_greater(ae_fabs(f, _state),ae_fabs(g, _state))&&ae_fp_less(*cs,(double)(0)) ) - { - *cs = -*cs; - *sn = -*sn; - *r = -*r; - } - } - } + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixrank1f(m, n, a, ia, ja, u, iu, v, iv); +#endif } +/************************************************************************* +Fast kernel - -void rmatrixinternalschurdecomposition(/* Real */ ae_matrix* h, + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixrighttrsmf(ae_int_t m, ae_int_t n, - ae_int_t tneeded, - ae_int_t zneeded, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - /* Real */ ae_matrix* z, - ae_int_t* info, + /* Complex */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Complex */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, ae_state *_state) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t j; - ae_matrix h1; - ae_matrix z1; - ae_vector wr1; - ae_vector wi1; +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; - ae_frame_make(_state, &_frame_block); - ae_vector_clear(wr); - ae_vector_clear(wi); - *info = 0; - ae_matrix_init(&h1, 0, 0, DT_REAL, _state); - ae_matrix_init(&z1, 0, 0, DT_REAL, _state); - ae_vector_init(&wr1, 0, DT_REAL, _state); - ae_vector_init(&wi1, 0, DT_REAL, _state); - - /* - * Allocate space - */ - ae_vector_set_length(wr, n, _state); - ae_vector_set_length(wi, n, _state); - if( zneeded==2 ) - { - rmatrixsetlengthatleast(z, n, n, _state); - } - - /* - * MKL version - */ - if( rmatrixinternalschurdecompositionmkl(h, n, tneeded, zneeded, wr, wi, z, info, _state) ) - { - ae_frame_leave(_state); - return; - } - - /* - * ALGLIB version - */ - ae_matrix_set_length(&h1, n+1, n+1, _state); - for(i=0; i<=n-1; i++) - { - for(j=0; j<=n-1; j++) - { - h1.ptr.pp_double[1+i][1+j] = h->ptr.pp_double[i][j]; - } - } - if( zneeded==1 ) - { - ae_matrix_set_length(&z1, n+1, n+1, _state); - for(i=0; i<=n-1; i++) - { - for(j=0; j<=n-1; j++) - { - z1.ptr.pp_double[1+i][1+j] = z->ptr.pp_double[i][j]; - } - } - } - internalschurdecomposition(&h1, n, tneeded, zneeded, &wr1, &wi1, &z1, info, _state); - for(i=0; i<=n-1; i++) - { - wr->ptr.p_double[i] = wr1.ptr.p_double[i+1]; - wi->ptr.p_double[i] = wi1.ptr.p_double[i+1]; - } - if( tneeded!=0 ) - { - for(i=0; i<=n-1; i++) - { - for(j=0; j<=n-1; j++) - { - h->ptr.pp_double[i][j] = h1.ptr.pp_double[1+i][1+j]; - } - } - } - if( zneeded!=0 ) - { - rmatrixsetlengthatleast(z, n, n, _state); - for(i=0; i<=n-1; i++) - { - for(j=0; j<=n-1; j++) - { - z->ptr.pp_double[i][j] = z1.ptr.pp_double[1+i][1+j]; - } - } - } - ae_frame_leave(_state); + result = ae_false; + return result; +#else + return _ialglib_i_cmatrixrighttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } /************************************************************************* -Subroutine performing the Schur decomposition of a matrix in upper -Hessenberg form using the QR algorithm with multiple shifts. +Fast kernel + + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixlefttrsmf(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Complex */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; -The source matrix H is represented as S'*H*S = T, where H - matrix in -upper Hessenberg form, S - orthogonal matrix (Schur vectors), T - upper -quasi-triangular matrix (with blocks of sizes 1x1 and 2x2 on the main -diagonal). -Input parameters: - H - matrix to be decomposed. - Array whose indexes range within [1..N, 1..N]. - N - size of H, N>=0. + result = ae_false; + return result; +#else + return _ialglib_i_cmatrixlefttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif +} -Output parameters: - H – contains the matrix T. - Array whose indexes range within [1..N, 1..N]. - All elements below the blocks on the main diagonal are equal - to 0. - S - contains Schur vectors. - Array whose indexes range within [1..N, 1..N]. - -Note 1: - The block structure of matrix T could be easily recognized: since all - the elements below the blocks are zeros, the elements a[i+1,i] which - are equal to 0 show the block border. - -Note 2: - the algorithm performance depends on the value of the internal - parameter NS of InternalSchurDecomposition subroutine which defines - the number of shifts in the QR algorithm (analog of the block width - in block matrix algorithms in linear algebra). If you require maximum - performance on your machine, it is recommended to adjust this - parameter manually. - -Result: - True, if the algorithm has converged and the parameters H and S contain - the result. - False, if the algorithm has not converged. +/************************************************************************* +Fast kernel -Algorithm implemented on the basis of subroutine DHSEQR (LAPACK 3.0 library). + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey *************************************************************************/ -ae_bool upperhessenbergschurdecomposition(/* Real */ ae_matrix* h, +ae_bool rmatrixrighttrsmf(ae_int_t m, ae_int_t n, - /* Real */ ae_matrix* s, + /* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, ae_state *_state) { - ae_frame _frame_block; - ae_vector wi; - ae_vector wr; - ae_int_t info; +#ifndef ALGLIB_INTERCEPTS_ABLAS ae_bool result; - ae_frame_make(_state, &_frame_block); - ae_matrix_clear(s); - ae_vector_init(&wi, 0, DT_REAL, _state); - ae_vector_init(&wr, 0, DT_REAL, _state); - internalschurdecomposition(h, n, 1, 2, &wr, &wi, s, &info, _state); - result = info==0; - ae_frame_leave(_state); + result = ae_false; return result; +#else + return _ialglib_i_rmatrixrighttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } -void internalschurdecomposition(/* Real */ ae_matrix* h, +/************************************************************************* +Fast kernel + + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixlefttrsmf(ae_int_t m, ae_int_t n, - ae_int_t tneeded, - ae_int_t zneeded, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - /* Real */ ae_matrix* z, - ae_int_t* info, + /* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, ae_state *_state) { - ae_frame _frame_block; - ae_vector work; - ae_int_t i; - ae_int_t i1; - ae_int_t i2; - ae_int_t ierr; - ae_int_t ii; - ae_int_t itemp; - ae_int_t itn; - ae_int_t its; - ae_int_t j; - ae_int_t k; - ae_int_t l; - ae_int_t maxb; - ae_int_t nr; - ae_int_t ns; - ae_int_t nv; - double absw; - double smlnum; - double tau; - double temp; - double tst1; - double ulp; - double unfl; - ae_matrix s; - ae_vector v; - ae_vector vv; - ae_vector workc1; - ae_vector works1; - ae_vector workv3; - ae_vector tmpwr; - ae_vector tmpwi; - ae_bool initz; - ae_bool wantt; - ae_bool wantz; - double cnst; - ae_bool failflag; - ae_int_t p1; - ae_int_t p2; - double vt; +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; - ae_frame_make(_state, &_frame_block); - ae_vector_clear(wr); - ae_vector_clear(wi); - *info = 0; - ae_vector_init(&work, 0, DT_REAL, _state); - ae_matrix_init(&s, 0, 0, DT_REAL, _state); - ae_vector_init(&v, 0, DT_REAL, _state); - ae_vector_init(&vv, 0, DT_REAL, _state); - ae_vector_init(&workc1, 0, DT_REAL, _state); - ae_vector_init(&works1, 0, DT_REAL, _state); - ae_vector_init(&workv3, 0, DT_REAL, _state); - ae_vector_init(&tmpwr, 0, DT_REAL, _state); - ae_vector_init(&tmpwi, 0, DT_REAL, _state); - - /* - * Set the order of the multi-shift QR algorithm to be used. - * If you want to tune algorithm, change this values - */ - ns = 12; - maxb = 50; - - /* - * Now 2 < NS <= MAXB < NH. - */ - maxb = ae_maxint(3, maxb, _state); - ns = ae_minint(maxb, ns, _state); - - /* - * Initialize - */ - cnst = 1.5; - ae_vector_set_length(&work, ae_maxint(n, 1, _state)+1, _state); - ae_matrix_set_length(&s, ns+1, ns+1, _state); - ae_vector_set_length(&v, ns+1+1, _state); - ae_vector_set_length(&vv, ns+1+1, _state); - ae_vector_set_length(wr, ae_maxint(n, 1, _state)+1, _state); - ae_vector_set_length(wi, ae_maxint(n, 1, _state)+1, _state); - ae_vector_set_length(&workc1, 1+1, _state); - ae_vector_set_length(&works1, 1+1, _state); - ae_vector_set_length(&workv3, 3+1, _state); - ae_vector_set_length(&tmpwr, ae_maxint(n, 1, _state)+1, _state); - ae_vector_set_length(&tmpwi, ae_maxint(n, 1, _state)+1, _state); - ae_assert(n>=0, "InternalSchurDecomposition: incorrect N!", _state); - ae_assert(tneeded==0||tneeded==1, "InternalSchurDecomposition: incorrect TNeeded!", _state); - ae_assert((zneeded==0||zneeded==1)||zneeded==2, "InternalSchurDecomposition: incorrect ZNeeded!", _state); - wantt = tneeded==1; - initz = zneeded==2; - wantz = zneeded!=0; - *info = 0; - - /* - * Initialize Z, if necessary - */ - if( initz ) - { - rmatrixsetlengthatleast(z, n+1, n+1, _state); - for(i=1; i<=n; i++) - { - for(j=1; j<=n; j++) - { - if( i==j ) - { - z->ptr.pp_double[i][j] = (double)(1); - } - else - { - z->ptr.pp_double[i][j] = (double)(0); - } - } - } - } - - /* - * Quick return if possible - */ - if( n==0 ) - { - ae_frame_leave(_state); - return; - } - if( n==1 ) - { - wr->ptr.p_double[1] = h->ptr.pp_double[1][1]; - wi->ptr.p_double[1] = (double)(0); - ae_frame_leave(_state); - return; - } - - /* - * Set rows and columns 1 to N to zero below the first - * subdiagonal. - */ - for(j=1; j<=n-2; j++) - { - for(i=j+2; i<=n; i++) - { - h->ptr.pp_double[i][j] = (double)(0); - } - } - - /* - * Test if N is sufficiently small - */ - if( (ns<=2||ns>n)||maxb>=n ) - { - - /* - * Use the standard double-shift algorithm - */ - hsschur_internalauxschur(wantt, wantz, n, 1, n, h, wr, wi, 1, n, z, &work, &workv3, &workc1, &works1, info, _state); - - /* - * fill entries under diagonal blocks of T with zeros - */ - if( wantt ) - { - j = 1; - while(j<=n) - { - if( ae_fp_eq(wi->ptr.p_double[j],(double)(0)) ) - { - for(i=j+1; i<=n; i++) - { - h->ptr.pp_double[i][j] = (double)(0); - } - j = j+1; - } - else - { - for(i=j+2; i<=n; i++) - { - h->ptr.pp_double[i][j] = (double)(0); - h->ptr.pp_double[i][j+1] = (double)(0); - } - j = j+2; - } - } - } - ae_frame_leave(_state); - return; - } - unfl = ae_minrealnumber; - ulp = 2*ae_machineepsilon; - smlnum = unfl*(n/ulp); - - /* - * I1 and I2 are the indices of the first row and last column of H - * to which transformations must be applied. If eigenvalues only are - * being computed, I1 and I2 are set inside the main loop. - */ - i1 = 1; - i2 = n; - - /* - * ITN is the total number of multiple-shift QR iterations allowed. - */ - itn = 30*n; - - /* - * The main loop begins here. I is the loop index and decreases from - * IHI to ILO in steps of at most MAXB. Each iteration of the loop - * works with the active submatrix in rows and columns L to I. - * Eigenvalues I+1 to IHI have already converged. Either L = ILO or - * H(L,L-1) is negligible so that the matrix splits. - */ - i = n; - for(;;) - { - l = 1; - if( i<1 ) - { - - /* - * fill entries under diagonal blocks of T with zeros - */ - if( wantt ) - { - j = 1; - while(j<=n) - { - if( ae_fp_eq(wi->ptr.p_double[j],(double)(0)) ) - { - for(i=j+1; i<=n; i++) - { - h->ptr.pp_double[i][j] = (double)(0); - } - j = j+1; - } - else - { - for(i=j+2; i<=n; i++) - { - h->ptr.pp_double[i][j] = (double)(0); - h->ptr.pp_double[i][j+1] = (double)(0); - } - j = j+2; - } - } - } - - /* - * Exit - */ - ae_frame_leave(_state); - return; - } - - /* - * Perform multiple-shift QR iterations on rows and columns ILO to I - * until a submatrix of order at most MAXB splits off at the bottom - * because a subdiagonal element has become negligible. - */ - failflag = ae_true; - for(its=0; its<=itn; its++) - { - - /* - * Look for a single small subdiagonal element. - */ - for(k=i; k>=l+1; k--) - { - tst1 = ae_fabs(h->ptr.pp_double[k-1][k-1], _state)+ae_fabs(h->ptr.pp_double[k][k], _state); - if( ae_fp_eq(tst1,(double)(0)) ) - { - tst1 = upperhessenberg1norm(h, l, i, l, i, &work, _state); - } - if( ae_fp_less_eq(ae_fabs(h->ptr.pp_double[k][k-1], _state),ae_maxreal(ulp*tst1, smlnum, _state)) ) - { - break; - } - } - l = k; - if( l>1 ) - { - - /* - * H(L,L-1) is negligible. - */ - h->ptr.pp_double[l][l-1] = (double)(0); - } - - /* - * Exit from loop if a submatrix of order <= MAXB has split off. - */ - if( l>=i-maxb+1 ) - { - failflag = ae_false; - break; - } - - /* - * Now the active submatrix is in rows and columns L to I. If - * eigenvalues only are being computed, only the active submatrix - * need be transformed. - */ - if( its==20||its==30 ) - { - - /* - * Exceptional shifts. - */ - for(ii=i-ns+1; ii<=i; ii++) - { - wr->ptr.p_double[ii] = cnst*(ae_fabs(h->ptr.pp_double[ii][ii-1], _state)+ae_fabs(h->ptr.pp_double[ii][ii], _state)); - wi->ptr.p_double[ii] = (double)(0); - } - } - else - { - - /* - * Use eigenvalues of trailing submatrix of order NS as shifts. - */ - copymatrix(h, i-ns+1, i, i-ns+1, i, &s, 1, ns, 1, ns, _state); - hsschur_internalauxschur(ae_false, ae_false, ns, 1, ns, &s, &tmpwr, &tmpwi, 1, ns, z, &work, &workv3, &workc1, &works1, &ierr, _state); - for(p1=1; p1<=ns; p1++) - { - wr->ptr.p_double[i-ns+p1] = tmpwr.ptr.p_double[p1]; - wi->ptr.p_double[i-ns+p1] = tmpwi.ptr.p_double[p1]; - } - if( ierr>0 ) - { - - /* - * If DLAHQR failed to compute all NS eigenvalues, use the - * unconverged diagonal elements as the remaining shifts. - */ - for(ii=1; ii<=ierr; ii++) - { - wr->ptr.p_double[i-ns+ii] = s.ptr.pp_double[ii][ii]; - wi->ptr.p_double[i-ns+ii] = (double)(0); - } - } - } - - /* - * Form the first column of (G-w(1)) (G-w(2)) . . . (G-w(ns)) - * where G is the Hessenberg submatrix H(L:I,L:I) and w is - * the vector of shifts (stored in WR and WI). The result is - * stored in the local array V. - */ - v.ptr.p_double[1] = (double)(1); - for(ii=2; ii<=ns+1; ii++) - { - v.ptr.p_double[ii] = (double)(0); - } - nv = 1; - for(j=i-ns+1; j<=i; j++) - { - if( ae_fp_greater_eq(wi->ptr.p_double[j],(double)(0)) ) - { - if( ae_fp_eq(wi->ptr.p_double[j],(double)(0)) ) - { - - /* - * real shift - */ - p1 = nv+1; - ae_v_move(&vv.ptr.p_double[1], 1, &v.ptr.p_double[1], 1, ae_v_len(1,p1)); - matrixvectormultiply(h, l, l+nv, l, l+nv-1, ae_false, &vv, 1, nv, 1.0, &v, 1, nv+1, -wr->ptr.p_double[j], _state); - nv = nv+1; - } - else - { - if( ae_fp_greater(wi->ptr.p_double[j],(double)(0)) ) - { - - /* - * complex conjugate pair of shifts - */ - p1 = nv+1; - ae_v_move(&vv.ptr.p_double[1], 1, &v.ptr.p_double[1], 1, ae_v_len(1,p1)); - matrixvectormultiply(h, l, l+nv, l, l+nv-1, ae_false, &v, 1, nv, 1.0, &vv, 1, nv+1, -2*wr->ptr.p_double[j], _state); - itemp = vectoridxabsmax(&vv, 1, nv+1, _state); - temp = 1/ae_maxreal(ae_fabs(vv.ptr.p_double[itemp], _state), smlnum, _state); - p1 = nv+1; - ae_v_muld(&vv.ptr.p_double[1], 1, ae_v_len(1,p1), temp); - absw = pythag2(wr->ptr.p_double[j], wi->ptr.p_double[j], _state); - temp = temp*absw*absw; - matrixvectormultiply(h, l, l+nv+1, l, l+nv, ae_false, &vv, 1, nv+1, 1.0, &v, 1, nv+2, temp, _state); - nv = nv+2; - } - } - - /* - * Scale V(1:NV) so that max(abs(V(i))) = 1. If V is zero, - * reset it to the unit vector. - */ - itemp = vectoridxabsmax(&v, 1, nv, _state); - temp = ae_fabs(v.ptr.p_double[itemp], _state); - if( ae_fp_eq(temp,(double)(0)) ) - { - v.ptr.p_double[1] = (double)(1); - for(ii=2; ii<=nv; ii++) - { - v.ptr.p_double[ii] = (double)(0); - } - } - else - { - temp = ae_maxreal(temp, smlnum, _state); - vt = 1/temp; - ae_v_muld(&v.ptr.p_double[1], 1, ae_v_len(1,nv), vt); - } - } - } - - /* - * Multiple-shift QR step - */ - for(k=l; k<=i-1; k++) - { - - /* - * The first iteration of this loop determines a reflection G - * from the vector V and applies it from left and right to H, - * thus creating a nonzero bulge below the subdiagonal. - * - * Each subsequent iteration determines a reflection G to - * restore the Hessenberg form in the (K-1)th column, and thus - * chases the bulge one step toward the bottom of the active - * submatrix. NR is the order of G. - */ - nr = ae_minint(ns+1, i-k+1, _state); - if( k>l ) - { - p1 = k-1; - p2 = k+nr-1; - ae_v_move(&v.ptr.p_double[1], 1, &h->ptr.pp_double[k][p1], h->stride, ae_v_len(1,nr)); - touchint(&p2, _state); - } - generatereflection(&v, nr, &tau, _state); - if( k>l ) - { - h->ptr.pp_double[k][k-1] = v.ptr.p_double[1]; - for(ii=k+1; ii<=i; ii++) - { - h->ptr.pp_double[ii][k-1] = (double)(0); - } - } - v.ptr.p_double[1] = (double)(1); - - /* - * Apply G from the left to transform the rows of the matrix in - * columns K to I2. - */ - applyreflectionfromtheleft(h, tau, &v, k, k+nr-1, k, i2, &work, _state); - - /* - * Apply G from the right to transform the columns of the - * matrix in rows I1 to min(K+NR,I). - */ - applyreflectionfromtheright(h, tau, &v, i1, ae_minint(k+nr, i, _state), k, k+nr-1, &work, _state); - if( wantz ) - { - - /* - * Accumulate transformations in the matrix Z - */ - applyreflectionfromtheright(z, tau, &v, 1, n, k, k+nr-1, &work, _state); - } - } - } - - /* - * Failure to converge in remaining number of iterations - */ - if( failflag ) - { - *info = i; - ae_frame_leave(_state); - return; - } - - /* - * A submatrix of order <= MAXB in rows and columns L to I has split - * off. Use the double-shift QR algorithm to handle it. - */ - hsschur_internalauxschur(wantt, wantz, n, l, i, h, wr, wi, 1, n, z, &work, &workv3, &workc1, &works1, info, _state); - if( *info>0 ) - { - ae_frame_leave(_state); - return; - } - - /* - * Decrement number of remaining iterations, and return to start of - * the main loop with a new value of I. - */ - itn = itn-its; - i = l-1; - } - ae_frame_leave(_state); + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixlefttrsmf(m, n, a, i1, j1, isupper, isunit, optype, x, i2, j2); +#endif } -static void hsschur_internalauxschur(ae_bool wantt, - ae_bool wantz, - ae_int_t n, - ae_int_t ilo, - ae_int_t ihi, - /* Real */ ae_matrix* h, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - ae_int_t iloz, - ae_int_t ihiz, - /* Real */ ae_matrix* z, - /* Real */ ae_vector* work, - /* Real */ ae_vector* workv3, - /* Real */ ae_vector* workc1, - /* Real */ ae_vector* works1, - ae_int_t* info, +/************************************************************************* +Fast kernel + + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixherkf(ae_int_t n, + ae_int_t k, + double alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + double beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_bool isupper, ae_state *_state) { - ae_int_t i; - ae_int_t i1; - ae_int_t i2; - ae_int_t itn; - ae_int_t its; - ae_int_t j; - ae_int_t k; - ae_int_t l; - ae_int_t m; - ae_int_t nh; - ae_int_t nr; - ae_int_t nz; - double ave; - double cs; - double disc; - double h00; - double h10; - double h11; - double h12; - double h21; - double h22; - double h33; - double h33s; - double h43h34; - double h44; - double h44s; - double s; - double smlnum; - double sn; - double sum; - double t1; - double t2; - double t3; - double tst1; - double unfl; - double v1; - double v2; - double v3; - ae_bool failflag; - double dat1; - double dat2; - ae_int_t p1; - double him1im1; - double him1i; - double hiim1; - double hii; - double wrim1; - double wri; - double wiim1; - double wii; - double ulp; +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_cmatrixherkf(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); +#endif +} + + +/************************************************************************* +Fast kernel + + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixsyrkf(ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_bool isupper, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixsyrkf(n, k, alpha, a, ia, ja, optypea, beta, c, ic, jc, isupper); +#endif +} + + +/************************************************************************* +Fast kernel + + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixgemmf(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_rmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); +#endif +} + + +/************************************************************************* +Fast kernel + + -- ALGLIB routine -- + 19.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixgemmf(ae_int_t m, + ae_int_t n, + ae_int_t k, + ae_complex alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Complex */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + ae_complex beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state) +{ +#ifndef ALGLIB_INTERCEPTS_ABLAS + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_cmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc); +#endif +} + + +/************************************************************************* +CMatrixGEMM kernel, basecase code for CMatrixGEMM. + +This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where: +* C is MxN general matrix +* op1(A) is MxK matrix +* op2(B) is KxN matrix +* "op" may be identity transformation, transposition, conjugate transposition + +Additional info: +* multiplication result replaces C. If Beta=0, C elements are not used in + calculations (not multiplied by zero - just not referenced) +* if Alpha=0, A is not used (not multiplied by zero - just not referenced) +* if both Beta and Alpha are zero, C is filled by zeros. + +IMPORTANT: + +This function does NOT preallocate output matrix C, it MUST be preallocated +by caller prior to calling this function. In case C does not have enough +space to store result, exception will be generated. + +INPUT PARAMETERS + M - matrix size, M>0 + N - matrix size, N>0 + K - matrix size, K>0 + Alpha - coefficient + A - matrix + IA - submatrix offset + JA - submatrix offset + OpTypeA - transformation type: + * 0 - no transformation + * 1 - transposition + * 2 - conjugate transposition + B - matrix + IB - submatrix offset + JB - submatrix offset + OpTypeB - transformation type: + * 0 - no transformation + * 1 - transposition + * 2 - conjugate transposition + Beta - coefficient + C - PREALLOCATED output matrix + IC - submatrix offset + JC - submatrix offset + + -- ALGLIB routine -- + 27.03.2013 + Bochkanov Sergey +*************************************************************************/ +void cmatrixgemmk(ae_int_t m, + ae_int_t n, + ae_int_t k, + ae_complex alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Complex */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + ae_complex beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + ae_complex v; + ae_complex v00; + ae_complex v01; + ae_complex v10; + ae_complex v11; + double v00x; + double v00y; + double v01x; + double v01y; + double v10x; + double v10y; + double v11x; + double v11y; + double a0x; + double a0y; + double a1x; + double a1y; + double b0x; + double b0y; + double b1x; + double b1y; + ae_int_t idxa0; + ae_int_t idxa1; + ae_int_t idxb0; + ae_int_t idxb1; + ae_int_t i0; + ae_int_t i1; + ae_int_t ik; + ae_int_t j0; + ae_int_t j1; + ae_int_t jk; + ae_int_t t; + ae_int_t offsa; + ae_int_t offsb; - *info = 0; - *info = 0; - dat1 = 0.75; - dat2 = -0.4375; - ulp = ae_machineepsilon; - - /* - * Quick return if possible - */ - if( n==0 ) - { - return; - } - if( ilo==ihi ) - { - wr->ptr.p_double[ilo] = h->ptr.pp_double[ilo][ilo]; - wi->ptr.p_double[ilo] = (double)(0); - return; - } - nh = ihi-ilo+1; - nz = ihiz-iloz+1; - - /* - * Set machine-dependent constants for the stopping criterion. - * If norm(H) <= sqrt(MaxRealNumber), overflow should not occur. - */ - unfl = ae_minrealnumber; - smlnum = unfl*(nh/ulp); /* - * I1 and I2 are the indices of the first row and last column of H - * to which transformations must be applied. If eigenvalues only are - * being computed, I1 and I2 are set inside the main loop. + * if matrix size is zero */ - i1 = 1; - i2 = n; + if( m==0||n==0 ) + { + return; + } /* - * ITN is the total number of QR iterations allowed. + * Try optimized code */ - itn = 30*nh; + if( cmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc, _state) ) + { + return; + } /* - * The main loop begins here. I is the loop index and decreases from - * IHI to ILO in steps of 1 or 2. Each iteration of the loop works - * with the active submatrix in rows and columns L to I. - * Eigenvalues I+1 to IHI have already converged. Either L = ILO or - * H(L,L-1) is negligible so that the matrix splits. + * if K=0 or Alpha=0, then C=Beta*C */ - i = ihi; - for(;;) + if( k==0||ae_c_eq_d(alpha,(double)(0)) ) { - l = ilo; - if( i=l+1; k--) + if( ae_c_neq_d(beta,(double)(0)) ) { - tst1 = ae_fabs(h->ptr.pp_double[k-1][k-1], _state)+ae_fabs(h->ptr.pp_double[k][k], _state); - if( ae_fp_eq(tst1,(double)(0)) ) + for(i=0; i<=m-1; i++) { - tst1 = upperhessenberg1norm(h, l, i, l, i, work, _state); + for(j=0; j<=n-1; j++) + { + c->ptr.pp_complex[ic+i][jc+j] = ae_c_mul(beta,c->ptr.pp_complex[ic+i][jc+j]); + } } - if( ae_fp_less_eq(ae_fabs(h->ptr.pp_double[k][k-1], _state),ae_maxreal(ulp*tst1, smlnum, _state)) ) - { - break; - } - } - l = k; - if( l>ilo ) - { - - /* - * H(L,L-1) is negligible - */ - h->ptr.pp_double[l][l-1] = (double)(0); - } - - /* - * Exit from loop if a submatrix of order 1 or 2 has split off. - */ - if( l>=i-1 ) - { - failflag = ae_false; - break; - } - - /* - * Now the active submatrix is in rows and columns L to I. If - * eigenvalues only are being computed, only the active submatrix - * need be transformed. - */ - if( its==10||its==20 ) - { - - /* - * Exceptional shift. - */ - s = ae_fabs(h->ptr.pp_double[i][i-1], _state)+ae_fabs(h->ptr.pp_double[i-1][i-2], _state); - h44 = dat1*s+h->ptr.pp_double[i][i]; - h33 = h44; - h43h34 = dat2*s*s; } else { - - /* - * Prepare to use Francis' double shift - * (i.e. 2nd degree generalized Rayleigh quotient) - */ - h44 = h->ptr.pp_double[i][i]; - h33 = h->ptr.pp_double[i-1][i-1]; - h43h34 = h->ptr.pp_double[i][i-1]*h->ptr.pp_double[i-1][i]; - s = h->ptr.pp_double[i-1][i-2]*h->ptr.pp_double[i-1][i-2]; - disc = (h33-h44)*0.5; - disc = disc*disc+h43h34; - if( ae_fp_greater(disc,(double)(0)) ) + for(i=0; i<=m-1; i++) { - - /* - * Real roots: use Wilkinson's shift twice - */ - disc = ae_sqrt(disc, _state); - ave = 0.5*(h33+h44); - if( ae_fp_greater(ae_fabs(h33, _state)-ae_fabs(h44, _state),(double)(0)) ) - { - h33 = h33*h44-h43h34; - h44 = h33/(hsschur_extschursign(disc, ave, _state)+ave); - } - else + for(j=0; j<=n-1; j++) { - h44 = hsschur_extschursign(disc, ave, _state)+ave; + c->ptr.pp_complex[ic+i][jc+j] = ae_complex_from_i(0); } - h33 = h44; - h43h34 = (double)(0); } } + } + return; + } + + /* + * This phase is not really necessary, but compiler complains + * about "possibly uninitialized variables" + */ + a0x = (double)(0); + a0y = (double)(0); + a1x = (double)(0); + a1y = (double)(0); + b0x = (double)(0); + b0y = (double)(0); + b1x = (double)(0); + b1y = (double)(0); + + /* + * General case + */ + i = 0; + while(i=l; m--) + if( i+2<=m&&j+2<=n ) { /* - * Determine the effect of starting the double-shift QR - * iteration at row M, and see if this would make H(M,M-1) - * negligible. + * Specialized 4x4 code for [I..I+3]x[J..J+3] submatrix of C. + * + * This submatrix is calculated as sum of K rank-1 products, + * with operands cached in local variables in order to speed + * up operations with arrays. */ - h11 = h->ptr.pp_double[m][m]; - h22 = h->ptr.pp_double[m+1][m+1]; - h21 = h->ptr.pp_double[m+1][m]; - h12 = h->ptr.pp_double[m][m+1]; - h44s = h44-h11; - h33s = h33-h11; - v1 = (h33s*h44s-h43h34)/h21+h12; - v2 = h22-h11-h33s-h44s; - v3 = h->ptr.pp_double[m+2][m+1]; - s = ae_fabs(v1, _state)+ae_fabs(v2, _state)+ae_fabs(v3, _state); - v1 = v1/s; - v2 = v2/s; - v3 = v3/s; - workv3->ptr.p_double[1] = v1; - workv3->ptr.p_double[2] = v2; - workv3->ptr.p_double[3] = v3; - if( m==l ) + v00x = 0.0; + v00y = 0.0; + v01x = 0.0; + v01y = 0.0; + v10x = 0.0; + v10y = 0.0; + v11x = 0.0; + v11y = 0.0; + if( optypea==0 ) { - break; + idxa0 = ia+i+0; + idxa1 = ia+i+1; + offsa = ja; } - h00 = h->ptr.pp_double[m-1][m-1]; - h10 = h->ptr.pp_double[m][m-1]; - tst1 = ae_fabs(v1, _state)*(ae_fabs(h00, _state)+ae_fabs(h11, _state)+ae_fabs(h22, _state)); - if( ae_fp_less_eq(ae_fabs(h10, _state)*(ae_fabs(v2, _state)+ae_fabs(v3, _state)),ulp*tst1) ) + else { - break; + idxa0 = ja+i+0; + idxa1 = ja+i+1; + offsa = ia; } - } - - /* - * Double-shift QR step - */ - for(k=m; k<=i-1; k++) - { - - /* - * The first iteration of this loop determines a reflection G - * from the vector V and applies it from left and right to H, - * thus creating a nonzero bulge below the subdiagonal. - * - * Each subsequent iteration determines a reflection G to - * restore the Hessenberg form in the (K-1)th column, and thus - * chases the bulge one step toward the bottom of the active - * submatrix. NR is the order of G. - */ - nr = ae_minint(3, i-k+1, _state); - if( k>m ) + if( optypeb==0 ) { - for(p1=1; p1<=nr; p1++) - { - workv3->ptr.p_double[p1] = h->ptr.pp_double[k+p1-1][k-1]; - } + idxb0 = jb+j+0; + idxb1 = jb+j+1; + offsb = ib; } - generatereflection(workv3, nr, &t1, _state); - if( k>m ) + else { - h->ptr.pp_double[k][k-1] = workv3->ptr.p_double[1]; - h->ptr.pp_double[k+1][k-1] = (double)(0); - if( kptr.pp_double[k+2][k-1] = (double)(0); - } + idxb0 = ib+j+0; + idxb1 = ib+j+1; + offsb = jb; } - else + for(t=0; t<=k-1; t++) { - if( m>l ) + if( optypea==0 ) { - h->ptr.pp_double[k][k-1] = -h->ptr.pp_double[k][k-1]; + a0x = a->ptr.pp_complex[idxa0][offsa].x; + a0y = a->ptr.pp_complex[idxa0][offsa].y; + a1x = a->ptr.pp_complex[idxa1][offsa].x; + a1y = a->ptr.pp_complex[idxa1][offsa].y; } - } - v2 = workv3->ptr.p_double[2]; - t2 = t1*v2; - if( nr==3 ) - { - v3 = workv3->ptr.p_double[3]; - t3 = t1*v3; - - /* - * Apply G from the left to transform the rows of the matrix - * in columns K to I2. - */ - for(j=k; j<=i2; j++) + if( optypea==1 ) { - sum = h->ptr.pp_double[k][j]+v2*h->ptr.pp_double[k+1][j]+v3*h->ptr.pp_double[k+2][j]; - h->ptr.pp_double[k][j] = h->ptr.pp_double[k][j]-sum*t1; - h->ptr.pp_double[k+1][j] = h->ptr.pp_double[k+1][j]-sum*t2; - h->ptr.pp_double[k+2][j] = h->ptr.pp_double[k+2][j]-sum*t3; + a0x = a->ptr.pp_complex[offsa][idxa0].x; + a0y = a->ptr.pp_complex[offsa][idxa0].y; + a1x = a->ptr.pp_complex[offsa][idxa1].x; + a1y = a->ptr.pp_complex[offsa][idxa1].y; } - - /* - * Apply G from the right to transform the columns of the - * matrix in rows I1 to min(K+3,I). - */ - for(j=i1; j<=ae_minint(k+3, i, _state); j++) + if( optypea==2 ) + { + a0x = a->ptr.pp_complex[offsa][idxa0].x; + a0y = -a->ptr.pp_complex[offsa][idxa0].y; + a1x = a->ptr.pp_complex[offsa][idxa1].x; + a1y = -a->ptr.pp_complex[offsa][idxa1].y; + } + if( optypeb==0 ) { - sum = h->ptr.pp_double[j][k]+v2*h->ptr.pp_double[j][k+1]+v3*h->ptr.pp_double[j][k+2]; - h->ptr.pp_double[j][k] = h->ptr.pp_double[j][k]-sum*t1; - h->ptr.pp_double[j][k+1] = h->ptr.pp_double[j][k+1]-sum*t2; - h->ptr.pp_double[j][k+2] = h->ptr.pp_double[j][k+2]-sum*t3; + b0x = b->ptr.pp_complex[offsb][idxb0].x; + b0y = b->ptr.pp_complex[offsb][idxb0].y; + b1x = b->ptr.pp_complex[offsb][idxb1].x; + b1y = b->ptr.pp_complex[offsb][idxb1].y; } - if( wantz ) + if( optypeb==1 ) { - - /* - * Accumulate transformations in the matrix Z - */ - for(j=iloz; j<=ihiz; j++) - { - sum = z->ptr.pp_double[j][k]+v2*z->ptr.pp_double[j][k+1]+v3*z->ptr.pp_double[j][k+2]; - z->ptr.pp_double[j][k] = z->ptr.pp_double[j][k]-sum*t1; - z->ptr.pp_double[j][k+1] = z->ptr.pp_double[j][k+1]-sum*t2; - z->ptr.pp_double[j][k+2] = z->ptr.pp_double[j][k+2]-sum*t3; - } + b0x = b->ptr.pp_complex[idxb0][offsb].x; + b0y = b->ptr.pp_complex[idxb0][offsb].y; + b1x = b->ptr.pp_complex[idxb1][offsb].x; + b1y = b->ptr.pp_complex[idxb1][offsb].y; + } + if( optypeb==2 ) + { + b0x = b->ptr.pp_complex[idxb0][offsb].x; + b0y = -b->ptr.pp_complex[idxb0][offsb].y; + b1x = b->ptr.pp_complex[idxb1][offsb].x; + b1y = -b->ptr.pp_complex[idxb1][offsb].y; } + v00x = v00x+a0x*b0x-a0y*b0y; + v00y = v00y+a0x*b0y+a0y*b0x; + v01x = v01x+a0x*b1x-a0y*b1y; + v01y = v01y+a0x*b1y+a0y*b1x; + v10x = v10x+a1x*b0x-a1y*b0y; + v10y = v10y+a1x*b0y+a1y*b0x; + v11x = v11x+a1x*b1x-a1y*b1y; + v11y = v11y+a1x*b1y+a1y*b1x; + offsa = offsa+1; + offsb = offsb+1; + } + v00.x = v00x; + v00.y = v00y; + v10.x = v10x; + v10.y = v10y; + v01.x = v01x; + v01.y = v01y; + v11.x = v11x; + v11.y = v11y; + if( ae_c_eq_d(beta,(double)(0)) ) + { + c->ptr.pp_complex[ic+i+0][jc+j+0] = ae_c_mul(alpha,v00); + c->ptr.pp_complex[ic+i+0][jc+j+1] = ae_c_mul(alpha,v01); + c->ptr.pp_complex[ic+i+1][jc+j+0] = ae_c_mul(alpha,v10); + c->ptr.pp_complex[ic+i+1][jc+j+1] = ae_c_mul(alpha,v11); } else { - if( nr==2 ) - { - - /* - * Apply G from the left to transform the rows of the matrix - * in columns K to I2. - */ - for(j=k; j<=i2; j++) - { - sum = h->ptr.pp_double[k][j]+v2*h->ptr.pp_double[k+1][j]; - h->ptr.pp_double[k][j] = h->ptr.pp_double[k][j]-sum*t1; - h->ptr.pp_double[k+1][j] = h->ptr.pp_double[k+1][j]-sum*t2; - } - - /* - * Apply G from the right to transform the columns of the - * matrix in rows I1 to min(K+3,I). - */ - for(j=i1; j<=i; j++) - { - sum = h->ptr.pp_double[j][k]+v2*h->ptr.pp_double[j][k+1]; - h->ptr.pp_double[j][k] = h->ptr.pp_double[j][k]-sum*t1; - h->ptr.pp_double[j][k+1] = h->ptr.pp_double[j][k+1]-sum*t2; - } - if( wantz ) - { - - /* - * Accumulate transformations in the matrix Z - */ - for(j=iloz; j<=ihiz; j++) - { - sum = z->ptr.pp_double[j][k]+v2*z->ptr.pp_double[j][k+1]; - z->ptr.pp_double[j][k] = z->ptr.pp_double[j][k]-sum*t1; - z->ptr.pp_double[j][k+1] = z->ptr.pp_double[j][k+1]-sum*t2; - } - } - } + c->ptr.pp_complex[ic+i+0][jc+j+0] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+0][jc+j+0]),ae_c_mul(alpha,v00)); + c->ptr.pp_complex[ic+i+0][jc+j+1] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+0][jc+j+1]),ae_c_mul(alpha,v01)); + c->ptr.pp_complex[ic+i+1][jc+j+0] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+1][jc+j+0]),ae_c_mul(alpha,v10)); + c->ptr.pp_complex[ic+i+1][jc+j+1] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+i+1][jc+j+1]),ae_c_mul(alpha,v11)); } } - } - if( failflag ) - { - - /* - * Failure to converge in remaining number of iterations - */ - *info = i; - return; - } - if( l==i ) - { - - /* - * H(I,I-1) is negligible: one eigenvalue has converged. - */ - wr->ptr.p_double[i] = h->ptr.pp_double[i][i]; - wi->ptr.p_double[i] = (double)(0); - } - else - { - if( l==i-1 ) + else { /* - * H(I-1,I-2) is negligible: a pair of eigenvalues have converged. - * - * Transform the 2-by-2 submatrix to standard Schur form, - * and compute and store the eigenvalues. + * Determine submatrix [I0..I1]x[J0..J1] to process + */ + i0 = i; + i1 = ae_minint(i+1, m-1, _state); + j0 = j; + j1 = ae_minint(j+1, n-1, _state); + + /* + * Process submatrix */ - him1im1 = h->ptr.pp_double[i-1][i-1]; - him1i = h->ptr.pp_double[i-1][i]; - hiim1 = h->ptr.pp_double[i][i-1]; - hii = h->ptr.pp_double[i][i]; - hsschur_aux2x2schur(&him1im1, &him1i, &hiim1, &hii, &wrim1, &wiim1, &wri, &wii, &cs, &sn, _state); - wr->ptr.p_double[i-1] = wrim1; - wi->ptr.p_double[i-1] = wiim1; - wr->ptr.p_double[i] = wri; - wi->ptr.p_double[i] = wii; - h->ptr.pp_double[i-1][i-1] = him1im1; - h->ptr.pp_double[i-1][i] = him1i; - h->ptr.pp_double[i][i-1] = hiim1; - h->ptr.pp_double[i][i] = hii; - if( wantt ) + for(ik=i0; ik<=i1; ik++) { - - /* - * Apply the transformation to the rest of H. - */ - if( i2>i ) + for(jk=j0; jk<=j1; jk++) { - workc1->ptr.p_double[1] = cs; - works1->ptr.p_double[1] = sn; - applyrotationsfromtheleft(ae_true, i-1, i, i+1, i2, workc1, works1, h, work, _state); - } - workc1->ptr.p_double[1] = cs; - works1->ptr.p_double[1] = sn; - applyrotationsfromtheright(ae_true, i1, i-2, i-1, i, workc1, works1, h, work, _state); - } - if( wantz ) - { - - /* - * Apply the transformation to Z. - */ - workc1->ptr.p_double[1] = cs; - works1->ptr.p_double[1] = sn; - applyrotationsfromtheright(ae_true, iloz, iloz+nz-1, i-1, i, workc1, works1, z, work, _state); - } - } - } - - /* - * Decrement number of remaining iterations, and return to start of - * the main loop with new value of I. - */ - itn = itn-its; - i = l-1; - } -} - - -static void hsschur_aux2x2schur(double* a, - double* b, - double* c, - double* d, - double* rt1r, - double* rt1i, - double* rt2r, - double* rt2i, - double* cs, - double* sn, - ae_state *_state) -{ - double multpl; - double aa; - double bb; - double bcmax; - double bcmis; - double cc; - double cs1; - double dd; - double eps; - double p; - double sab; - double sac; - double scl; - double sigma; - double sn1; - double tau; - double temp; - double z; - - *rt1r = 0; - *rt1i = 0; - *rt2r = 0; - *rt2i = 0; - *cs = 0; - *sn = 0; - - multpl = 4.0; - eps = ae_machineepsilon; - if( ae_fp_eq(*c,(double)(0)) ) - { - *cs = (double)(1); - *sn = (double)(0); - } - else - { - if( ae_fp_eq(*b,(double)(0)) ) - { - - /* - * Swap rows and columns - */ - *cs = (double)(0); - *sn = (double)(1); - temp = *d; - *d = *a; - *a = temp; - *b = -*c; - *c = (double)(0); - } - else - { - if( ae_fp_eq(*a-(*d),(double)(0))&&hsschur_extschursigntoone(*b, _state)!=hsschur_extschursigntoone(*c, _state) ) - { - *cs = (double)(1); - *sn = (double)(0); - } - else - { - temp = *a-(*d); - p = 0.5*temp; - bcmax = ae_maxreal(ae_fabs(*b, _state), ae_fabs(*c, _state), _state); - bcmis = ae_minreal(ae_fabs(*b, _state), ae_fabs(*c, _state), _state)*hsschur_extschursigntoone(*b, _state)*hsschur_extschursigntoone(*c, _state); - scl = ae_maxreal(ae_fabs(p, _state), bcmax, _state); - z = p/scl*p+bcmax/scl*bcmis; - - /* - * If Z is of the order of the machine accuracy, postpone the - * decision on the nature of eigenvalues - */ - if( ae_fp_greater_eq(z,multpl*eps) ) - { - - /* - * Real eigenvalues. Compute A and D. - */ - z = p+hsschur_extschursign(ae_sqrt(scl, _state)*ae_sqrt(z, _state), p, _state); - *a = *d+z; - *d = *d-bcmax/z*bcmis; - - /* - * Compute B and the rotation matrix - */ - tau = pythag2(*c, z, _state); - *cs = z/tau; - *sn = *c/tau; - *b = *b-(*c); - *c = (double)(0); - } - else - { - - /* - * Complex eigenvalues, or real (almost) equal eigenvalues. - * Make diagonal elements equal. - */ - sigma = *b+(*c); - tau = pythag2(sigma, temp, _state); - *cs = ae_sqrt(0.5*(1+ae_fabs(sigma, _state)/tau), _state); - *sn = -p/(tau*(*cs))*hsschur_extschursign((double)(1), sigma, _state); - - /* - * Compute [ AA BB ] = [ A B ] [ CS -SN ] - * [ CC DD ] [ C D ] [ SN CS ] - */ - aa = *a*(*cs)+*b*(*sn); - bb = -*a*(*sn)+*b*(*cs); - cc = *c*(*cs)+*d*(*sn); - dd = -*c*(*sn)+*d*(*cs); - - /* - * Compute [ A B ] = [ CS SN ] [ AA BB ] - * [ C D ] [-SN CS ] [ CC DD ] - */ - *a = aa*(*cs)+cc*(*sn); - *b = bb*(*cs)+dd*(*sn); - *c = -aa*(*sn)+cc*(*cs); - *d = -bb*(*sn)+dd*(*cs); - temp = 0.5*(*a+(*d)); - *a = temp; - *d = temp; - if( ae_fp_neq(*c,(double)(0)) ) - { - if( ae_fp_neq(*b,(double)(0)) ) + if( k==0||ae_c_eq_d(alpha,(double)(0)) ) + { + v = ae_complex_from_i(0); + } + else { - if( hsschur_extschursigntoone(*b, _state)==hsschur_extschursigntoone(*c, _state) ) + v = ae_complex_from_d(0.0); + if( optypea==0&&optypeb==0 ) { - - /* - * Real eigenvalues: reduce to upper triangular form - */ - sab = ae_sqrt(ae_fabs(*b, _state), _state); - sac = ae_sqrt(ae_fabs(*c, _state), _state); - p = hsschur_extschursign(sab*sac, *c, _state); - tau = 1/ae_sqrt(ae_fabs(*b+(*c), _state), _state); - *a = temp+p; - *d = temp-p; - *b = *b-(*c); - *c = (double)(0); - cs1 = sab*tau; - sn1 = sac*tau; - temp = *cs*cs1-*sn*sn1; - *sn = *cs*sn1+*sn*cs1; - *cs = temp; + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia+ik][ja], 1, "N", &b->ptr.pp_complex[ib][jb+jk], b->stride, "N", ae_v_len(ja,ja+k-1)); + } + if( optypea==0&&optypeb==1 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia+ik][ja], 1, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "N", ae_v_len(ja,ja+k-1)); + } + if( optypea==0&&optypeb==2 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia+ik][ja], 1, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "Conj", ae_v_len(ja,ja+k-1)); + } + if( optypea==1&&optypeb==0 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "N", &b->ptr.pp_complex[ib][jb+jk], b->stride, "N", ae_v_len(ia,ia+k-1)); + } + if( optypea==1&&optypeb==1 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "N", ae_v_len(ia,ia+k-1)); + } + if( optypea==1&&optypeb==2 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "N", &b->ptr.pp_complex[ib+jk][jb], 1, "Conj", ae_v_len(ia,ia+k-1)); + } + if( optypea==2&&optypeb==0 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "Conj", &b->ptr.pp_complex[ib][jb+jk], b->stride, "N", ae_v_len(ia,ia+k-1)); + } + if( optypea==2&&optypeb==1 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "Conj", &b->ptr.pp_complex[ib+jk][jb], 1, "N", ae_v_len(ia,ia+k-1)); + } + if( optypea==2&&optypeb==2 ) + { + v = ae_v_cdotproduct(&a->ptr.pp_complex[ia][ja+ik], a->stride, "Conj", &b->ptr.pp_complex[ib+jk][jb], 1, "Conj", ae_v_len(ia,ia+k-1)); } } + if( ae_c_eq_d(beta,(double)(0)) ) + { + c->ptr.pp_complex[ic+ik][jc+jk] = ae_c_mul(alpha,v); + } else { - *b = -*c; - *c = (double)(0); - temp = *cs; - *cs = -*sn; - *sn = temp; + c->ptr.pp_complex[ic+ik][jc+jk] = ae_c_add(ae_c_mul(beta,c->ptr.pp_complex[ic+ik][jc+jk]),ae_c_mul(alpha,v)); } } } } + j = j+2; } + i = i+2; } - - /* - * Store eigenvalues in (RT1R,RT1I) and (RT2R,RT2I). - */ - *rt1r = *a; - *rt2r = *d; - if( ae_fp_eq(*c,(double)(0)) ) - { - *rt1i = (double)(0); - *rt2i = (double)(0); - } - else - { - *rt1i = ae_sqrt(ae_fabs(*b, _state), _state)*ae_sqrt(ae_fabs(*c, _state), _state); - *rt2i = -*rt1i; - } -} - - -static double hsschur_extschursign(double a, double b, ae_state *_state) -{ - double result; - - - if( ae_fp_greater_eq(b,(double)(0)) ) - { - result = ae_fabs(a, _state); - } - else - { - result = -ae_fabs(a, _state); - } - return result; -} - - -static ae_int_t hsschur_extschursigntoone(double b, ae_state *_state) -{ - ae_int_t result; - - - if( ae_fp_greater_eq(b,(double)(0)) ) - { - result = 1; - } - else - { - result = -1; - } - return result; } - - /************************************************************************* -Utility subroutine performing the "safe" solution of system of linear -equations with triangular coefficient matrices. +RMatrixGEMM kernel, basecase code for RMatrixGEMM. -The subroutine uses scaling and solves the scaled system A*x=s*b (where s -is a scalar value) instead of A*x=b, choosing s so that x can be -represented by a floating-point number. The closer the system gets to a -singular, the less s is. If the system is singular, s=0 and x contains the -non-trivial solution of equation A*x=0. +This subroutine calculates C = alpha*op1(A)*op2(B) +beta*C where: +* C is MxN general matrix +* op1(A) is MxK matrix +* op2(B) is KxN matrix +* "op" may be identity transformation, transposition -The feature of an algorithm is that it could not cause an overflow or a -division by zero regardless of the matrix used as the input. +Additional info: +* multiplication result replaces C. If Beta=0, C elements are not used in + calculations (not multiplied by zero - just not referenced) +* if Alpha=0, A is not used (not multiplied by zero - just not referenced) +* if both Beta and Alpha are zero, C is filled by zeros. -The algorithm can solve systems of equations with upper/lower triangular -matrices, with/without unit diagonal, and systems of type A*x=b or A'*x=b -(where A' is a transposed matrix A). +IMPORTANT: -Input parameters: - A - system matrix. Array whose indexes range within [0..N-1, 0..N-1]. - N - size of matrix A. - X - right-hand member of a system. - Array whose index ranges within [0..N-1]. - IsUpper - matrix type. If it is True, the system matrix is the upper - triangular and is located in the corresponding part of - matrix A. - Trans - problem type. If it is True, the problem to be solved is - A'*x=b, otherwise it is A*x=b. - Isunit - matrix type. If it is True, the system matrix has a unit - diagonal (the elements on the main diagonal are not used - in the calculation process), otherwise the matrix is considered - to be a general triangular matrix. +This function does NOT preallocate output matrix C, it MUST be preallocated +by caller prior to calling this function. In case C does not have enough +space to store result, exception will be generated. -Output parameters: - X - solution. Array whose index ranges within [0..N-1]. - S - scaling factor. +INPUT PARAMETERS + M - matrix size, M>0 + N - matrix size, N>0 + K - matrix size, K>0 + Alpha - coefficient + A - matrix + IA - submatrix offset + JA - submatrix offset + OpTypeA - transformation type: + * 0 - no transformation + * 1 - transposition + B - matrix + IB - submatrix offset + JB - submatrix offset + OpTypeB - transformation type: + * 0 - no transformation + * 1 - transposition + Beta - coefficient + C - PREALLOCATED output matrix + IC - submatrix offset + JC - submatrix offset - -- LAPACK auxiliary routine (version 3.0) -- - Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., - Courant Institute, Argonne National Lab, and Rice University - June 30, 1992 + -- ALGLIB routine -- + 27.03.2013 + Bochkanov Sergey *************************************************************************/ -void rmatrixtrsafesolve(/* Real */ ae_matrix* a, +void rmatrixgemmk(ae_int_t m, ae_int_t n, - /* Real */ ae_vector* x, - double* s, - ae_bool isupper, - ae_bool istrans, - ae_bool isunit, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, ae_state *_state) { - ae_frame _frame_block; - ae_bool normin; - ae_vector cnorm; - ae_matrix a1; - ae_vector x1; ae_int_t i; + ae_int_t j; - ae_frame_make(_state, &_frame_block); - *s = 0; - ae_vector_init(&cnorm, 0, DT_REAL, _state); - ae_matrix_init(&a1, 0, 0, DT_REAL, _state); - ae_vector_init(&x1, 0, DT_REAL, _state); /* - * From 0-based to 1-based + * if matrix size is zero */ - normin = ae_false; - ae_matrix_set_length(&a1, n+1, n+1, _state); - ae_vector_set_length(&x1, n+1, _state); - for(i=1; i<=n; i++) + if( m==0||n==0 ) { - ae_v_move(&a1.ptr.pp_double[i][1], 1, &a->ptr.pp_double[i-1][0], 1, ae_v_len(1,n)); + return; } - ae_v_move(&x1.ptr.p_double[1], 1, &x->ptr.p_double[0], 1, ae_v_len(1,n)); /* - * Solve 1-based + * Try optimized code */ - safesolvetriangular(&a1, n, &x1, s, isupper, istrans, isunit, normin, &cnorm, _state); + if( rmatrixgemmf(m, n, k, alpha, a, ia, ja, optypea, b, ib, jb, optypeb, beta, c, ic, jc, _state) ) + { + return; + } /* - * From 1-based to 0-based + * if K=0 or Alpha=0, then C=Beta*C */ - ae_v_move(&x->ptr.p_double[0], 1, &x1.ptr.p_double[1], 1, ae_v_len(0,n-1)); - ae_frame_leave(_state); -} - - -/************************************************************************* -Obsolete 1-based subroutine. -See RMatrixTRSafeSolve for 0-based replacement. -*************************************************************************/ -void safesolvetriangular(/* Real */ ae_matrix* a, - ae_int_t n, - /* Real */ ae_vector* x, - double* s, - ae_bool isupper, - ae_bool istrans, - ae_bool isunit, - ae_bool normin, - /* Real */ ae_vector* cnorm, - ae_state *_state) -{ - ae_int_t i; - ae_int_t imax; - ae_int_t j; - ae_int_t jfirst; - ae_int_t jinc; - ae_int_t jlast; - ae_int_t jm1; - ae_int_t jp1; - ae_int_t ip1; - ae_int_t im1; - ae_int_t k; - ae_int_t flg; - double v; - double vd; - double bignum; - double grow; - double rec; - double smlnum; - double sumj; - double tjj; - double tjjs; - double tmax; - double tscal; - double uscal; - double xbnd; - double xj; - double xmax; - ae_bool notran; - ae_bool upper; - ae_bool nounit; - - *s = 0; - - upper = isupper; - notran = !istrans; - nounit = !isunit; - - /* - * these initializers are not really necessary, - * but without them compiler complains about uninitialized locals - */ - tjjs = (double)(0); - - /* - * Quick return if possible - */ - if( n==0 ) - { - return; - } - - /* - * Determine machine dependent parameters to control overflow. - */ - smlnum = ae_minrealnumber/(ae_machineepsilon*2); - bignum = 1/smlnum; - *s = (double)(1); - if( !normin ) + if( k==0||ae_fp_eq(alpha,(double)(0)) ) { - ae_vector_set_length(cnorm, n+1, _state); - - /* - * Compute the 1-norm of each column, not including the diagonal. - */ - if( upper ) + if( ae_fp_neq(beta,(double)(1)) ) { - - /* - * A is upper triangular. - */ - for(j=1; j<=n; j++) + if( ae_fp_neq(beta,(double)(0)) ) { - v = (double)(0); - for(k=1; k<=j-1; k++) + for(i=0; i<=m-1; i++) { - v = v+ae_fabs(a->ptr.pp_double[k][j], _state); + for(j=0; j<=n-1; j++) + { + c->ptr.pp_double[ic+i][jc+j] = beta*c->ptr.pp_double[ic+i][jc+j]; + } } - cnorm->ptr.p_double[j] = v; } - } - else - { - - /* - * A is lower triangular. - */ - for(j=1; j<=n-1; j++) + else { - v = (double)(0); - for(k=j+1; k<=n; k++) + for(i=0; i<=m-1; i++) { - v = v+ae_fabs(a->ptr.pp_double[k][j], _state); + for(j=0; j<=n-1; j++) + { + c->ptr.pp_double[ic+i][jc+j] = (double)(0); + } } - cnorm->ptr.p_double[j] = v; } - cnorm->ptr.p_double[n] = (double)(0); } + return; } /* - * Scale the column norms by TSCAL if the maximum element in CNORM is - * greater than BIGNUM. + * Call specialized code. + * + * NOTE: specialized code was moved to separate function because of strange + * issues with instructions cache on some systems; Having too long + * functions significantly slows down internal loop of the algorithm. */ - imax = 1; - for(k=2; k<=n; k++) + if( optypea==0&&optypeb==0 ) { - if( ae_fp_greater(cnorm->ptr.p_double[k],cnorm->ptr.p_double[imax]) ) - { - imax = k; - } + rmatrixgemmk44v00(m, n, k, alpha, a, ia, ja, b, ib, jb, beta, c, ic, jc, _state); } - tmax = cnorm->ptr.p_double[imax]; - if( ae_fp_less_eq(tmax,bignum) ) + if( optypea==0&&optypeb!=0 ) { - tscal = (double)(1); + rmatrixgemmk44v01(m, n, k, alpha, a, ia, ja, b, ib, jb, beta, c, ic, jc, _state); } - else + if( optypea!=0&&optypeb==0 ) { - tscal = 1/(smlnum*tmax); - ae_v_muld(&cnorm->ptr.p_double[1], 1, ae_v_len(1,n), tscal); + rmatrixgemmk44v10(m, n, k, alpha, a, ia, ja, b, ib, jb, beta, c, ic, jc, _state); + } + if( optypea!=0&&optypeb!=0 ) + { + rmatrixgemmk44v11(m, n, k, alpha, a, ia, ja, b, ib, jb, beta, c, ic, jc, _state); } +} + + +/************************************************************************* +RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation +with OpTypeA=0 and OpTypeB=0. + +Additional info: +* this function requires that Alpha<>0 (assertion is thrown otherwise) + +INPUT PARAMETERS + M - matrix size, M>0 + N - matrix size, N>0 + K - matrix size, K>0 + Alpha - coefficient + A - matrix + IA - submatrix offset + JA - submatrix offset + B - matrix + IB - submatrix offset + JB - submatrix offset + Beta - coefficient + C - PREALLOCATED output matrix + IC - submatrix offset + JC - submatrix offset + + -- ALGLIB routine -- + 27.03.2013 + Bochkanov Sergey +*************************************************************************/ +void rmatrixgemmk44v00(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + double v; + double v00; + double v01; + double v02; + double v03; + double v10; + double v11; + double v12; + double v13; + double v20; + double v21; + double v22; + double v23; + double v30; + double v31; + double v32; + double v33; + double a0; + double a1; + double a2; + double a3; + double b0; + double b1; + double b2; + double b3; + ae_int_t idxa0; + ae_int_t idxa1; + ae_int_t idxa2; + ae_int_t idxa3; + ae_int_t idxb0; + ae_int_t idxb1; + ae_int_t idxb2; + ae_int_t idxb3; + ae_int_t i0; + ae_int_t i1; + ae_int_t ik; + ae_int_t j0; + ae_int_t j1; + ae_int_t jk; + ae_int_t t; + ae_int_t offsa; + ae_int_t offsb; + + + ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); /* - * Compute a bound on the computed solution vector to see if the - * Level 2 BLAS routine DTRSV can be used. + * if matrix size is zero */ - j = 1; - for(k=2; k<=n; k++) + if( m==0||n==0 ) { - if( ae_fp_greater(ae_fabs(x->ptr.p_double[k], _state),ae_fabs(x->ptr.p_double[j], _state)) ) - { - j = k; - } + return; } - xmax = ae_fabs(x->ptr.p_double[j], _state); - xbnd = xmax; - if( notran ) - { - - /* - * Compute the growth in A * x = b. - */ - if( upper ) - { - jfirst = n; - jlast = 1; - jinc = -1; - } - else - { - jfirst = 1; - jlast = n; - jinc = 1; - } - if( ae_fp_neq(tscal,(double)(1)) ) - { - grow = (double)(0); - } - else + + /* + * A*B + */ + i = 0; + while(i0&&j<=jlast)||(jinc<0&&j>=jlast)) + idxa0 = ia+i+0; + idxa1 = ia+i+1; + idxa2 = ia+i+2; + idxa3 = ia+i+3; + offsa = ja; + idxb0 = jb+j+0; + idxb1 = jb+j+1; + idxb2 = jb+j+2; + idxb3 = jb+j+3; + offsb = ib; + v00 = 0.0; + v01 = 0.0; + v02 = 0.0; + v03 = 0.0; + v10 = 0.0; + v11 = 0.0; + v12 = 0.0; + v13 = 0.0; + v20 = 0.0; + v21 = 0.0; + v22 = 0.0; + v23 = 0.0; + v30 = 0.0; + v31 = 0.0; + v32 = 0.0; + v33 = 0.0; + + /* + * Different variants of internal loop + */ + for(t=0; t<=k-1; t++) { - - /* - * Exit the loop if the growth factor is too small. - */ - if( ae_fp_less_eq(grow,smlnum) ) - { - break; - } - - /* - * M(j) = G(j-1) / abs(A(j,j)) - */ - tjj = ae_fabs(a->ptr.pp_double[j][j], _state); - xbnd = ae_minreal(xbnd, ae_minreal((double)(1), tjj, _state)*grow, _state); - if( ae_fp_greater_eq(tjj+cnorm->ptr.p_double[j],smlnum) ) - { - - /* - * G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) - */ - grow = grow*(tjj/(tjj+cnorm->ptr.p_double[j])); - } - else - { - - /* - * G(j) could overflow, set GROW to 0. - */ - grow = (double)(0); - } - if( j==jlast ) - { - grow = xbnd; - } - j = j+jinc; + a0 = a->ptr.pp_double[idxa0][offsa]; + a1 = a->ptr.pp_double[idxa1][offsa]; + b0 = b->ptr.pp_double[offsb][idxb0]; + b1 = b->ptr.pp_double[offsb][idxb1]; + v00 = v00+a0*b0; + v01 = v01+a0*b1; + v10 = v10+a1*b0; + v11 = v11+a1*b1; + a2 = a->ptr.pp_double[idxa2][offsa]; + a3 = a->ptr.pp_double[idxa3][offsa]; + v20 = v20+a2*b0; + v21 = v21+a2*b1; + v30 = v30+a3*b0; + v31 = v31+a3*b1; + b2 = b->ptr.pp_double[offsb][idxb2]; + b3 = b->ptr.pp_double[offsb][idxb3]; + v22 = v22+a2*b2; + v23 = v23+a2*b3; + v32 = v32+a3*b2; + v33 = v33+a3*b3; + v02 = v02+a0*b2; + v03 = v03+a0*b3; + v12 = v12+a1*b2; + v13 = v13+a1*b3; + offsa = offsa+1; + offsb = offsb+1; + } + if( ae_fp_eq(beta,(double)(0)) ) + { + c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; + } + else + { + c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; } } else { /* - * A is unit triangular. - * - * Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. + * Determine submatrix [I0..I1]x[J0..J1] to process */ - grow = ae_minreal((double)(1), 1/ae_maxreal(xbnd, smlnum, _state), _state); - j = jfirst; - while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) + i0 = i; + i1 = ae_minint(i+3, m-1, _state); + j0 = j; + j1 = ae_minint(j+3, n-1, _state); + + /* + * Process submatrix + */ + for(ik=i0; ik<=i1; ik++) { - - /* - * Exit the loop if the growth factor is too small. - */ - if( ae_fp_less_eq(grow,smlnum) ) + for(jk=j0; jk<=j1; jk++) { - break; + if( k==0||ae_fp_eq(alpha,(double)(0)) ) + { + v = (double)(0); + } + else + { + v = ae_v_dotproduct(&a->ptr.pp_double[ia+ik][ja], 1, &b->ptr.pp_double[ib][jb+jk], b->stride, ae_v_len(ja,ja+k-1)); + } + if( ae_fp_eq(beta,(double)(0)) ) + { + c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; + } + else + { + c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; + } } - - /* - * G(j) = G(j-1)*( 1 + CNORM(j) ) - */ - grow = grow*(1/(1+cnorm->ptr.p_double[j])); - j = j+jinc; } } + j = j+4; } + i = i+4; } - else - { - - /* - * Compute the growth in A' * x = b. - */ - if( upper ) - { - jfirst = 1; - jlast = n; - jinc = 1; - } - else - { - jfirst = n; - jlast = 1; - jinc = -1; - } - if( ae_fp_neq(tscal,(double)(1)) ) - { - grow = (double)(0); - } - else - { - if( nounit ) - { - - /* - * A is non-unit triangular. - * - * Compute GROW = 1/G(j) and XBND = 1/M(j). - * Initially, M(0) = max{x(i), i=1,...,n}. - */ - grow = 1/ae_maxreal(xbnd, smlnum, _state); - xbnd = grow; - j = jfirst; - while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) - { - - /* - * Exit the loop if the growth factor is too small. - */ - if( ae_fp_less_eq(grow,smlnum) ) - { - break; - } - - /* - * G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) - */ - xj = 1+cnorm->ptr.p_double[j]; - grow = ae_minreal(grow, xbnd/xj, _state); - - /* - * M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) - */ - tjj = ae_fabs(a->ptr.pp_double[j][j], _state); - if( ae_fp_greater(xj,tjj) ) - { - xbnd = xbnd*(tjj/xj); - } - if( j==jlast ) - { - grow = ae_minreal(grow, xbnd, _state); - } - j = j+jinc; - } - } - else +} + + +/************************************************************************* +RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation +with OpTypeA=0 and OpTypeB=1. + +Additional info: +* this function requires that Alpha<>0 (assertion is thrown otherwise) + +INPUT PARAMETERS + M - matrix size, M>0 + N - matrix size, N>0 + K - matrix size, K>0 + Alpha - coefficient + A - matrix + IA - submatrix offset + JA - submatrix offset + B - matrix + IB - submatrix offset + JB - submatrix offset + Beta - coefficient + C - PREALLOCATED output matrix + IC - submatrix offset + JC - submatrix offset + + -- ALGLIB routine -- + 27.03.2013 + Bochkanov Sergey +*************************************************************************/ +void rmatrixgemmk44v01(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + double v; + double v00; + double v01; + double v02; + double v03; + double v10; + double v11; + double v12; + double v13; + double v20; + double v21; + double v22; + double v23; + double v30; + double v31; + double v32; + double v33; + double a0; + double a1; + double a2; + double a3; + double b0; + double b1; + double b2; + double b3; + ae_int_t idxa0; + ae_int_t idxa1; + ae_int_t idxa2; + ae_int_t idxa3; + ae_int_t idxb0; + ae_int_t idxb1; + ae_int_t idxb2; + ae_int_t idxb3; + ae_int_t i0; + ae_int_t i1; + ae_int_t ik; + ae_int_t j0; + ae_int_t j1; + ae_int_t jk; + ae_int_t t; + ae_int_t offsa; + ae_int_t offsb; + + + ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); + + /* + * if matrix size is zero + */ + if( m==0||n==0 ) + { + return; + } + + /* + * A*B' + */ + i = 0; + while(i0&&j<=jlast)||(jinc<0&&j>=jlast)) + idxa0 = ia+i+0; + idxa1 = ia+i+1; + idxa2 = ia+i+2; + idxa3 = ia+i+3; + offsa = ja; + idxb0 = ib+j+0; + idxb1 = ib+j+1; + idxb2 = ib+j+2; + idxb3 = ib+j+3; + offsb = jb; + v00 = 0.0; + v01 = 0.0; + v02 = 0.0; + v03 = 0.0; + v10 = 0.0; + v11 = 0.0; + v12 = 0.0; + v13 = 0.0; + v20 = 0.0; + v21 = 0.0; + v22 = 0.0; + v23 = 0.0; + v30 = 0.0; + v31 = 0.0; + v32 = 0.0; + v33 = 0.0; + for(t=0; t<=k-1; t++) { - - /* - * Exit the loop if the growth factor is too small. - */ - if( ae_fp_less_eq(grow,smlnum) ) - { - break; - } - - /* - * G(j) = ( 1 + CNORM(j) )*G(j-1) - */ - xj = 1+cnorm->ptr.p_double[j]; - grow = grow/xj; - j = j+jinc; + a0 = a->ptr.pp_double[idxa0][offsa]; + a1 = a->ptr.pp_double[idxa1][offsa]; + b0 = b->ptr.pp_double[idxb0][offsb]; + b1 = b->ptr.pp_double[idxb1][offsb]; + v00 = v00+a0*b0; + v01 = v01+a0*b1; + v10 = v10+a1*b0; + v11 = v11+a1*b1; + a2 = a->ptr.pp_double[idxa2][offsa]; + a3 = a->ptr.pp_double[idxa3][offsa]; + v20 = v20+a2*b0; + v21 = v21+a2*b1; + v30 = v30+a3*b0; + v31 = v31+a3*b1; + b2 = b->ptr.pp_double[idxb2][offsb]; + b3 = b->ptr.pp_double[idxb3][offsb]; + v22 = v22+a2*b2; + v23 = v23+a2*b3; + v32 = v32+a3*b2; + v33 = v33+a3*b3; + v02 = v02+a0*b2; + v03 = v03+a0*b3; + v12 = v12+a1*b2; + v13 = v13+a1*b3; + offsa = offsa+1; + offsb = offsb+1; } - } - } - } - if( ae_fp_greater(grow*tscal,smlnum) ) - { - - /* - * Use the Level 2 BLAS solve if the reciprocal of the bound on - * elements of X is not too small. - */ - if( (upper&¬ran)||(!upper&&!notran) ) - { - if( nounit ) - { - vd = a->ptr.pp_double[n][n]; - } - else - { - vd = (double)(1); - } - x->ptr.p_double[n] = x->ptr.p_double[n]/vd; - for(i=n-1; i>=1; i--) - { - ip1 = i+1; - if( upper ) + if( ae_fp_eq(beta,(double)(0)) ) { - v = ae_v_dotproduct(&a->ptr.pp_double[i][ip1], 1, &x->ptr.p_double[ip1], 1, ae_v_len(ip1,n)); - } - else - { - v = ae_v_dotproduct(&a->ptr.pp_double[ip1][i], a->stride, &x->ptr.p_double[ip1], 1, ae_v_len(ip1,n)); - } - if( nounit ) - { - vd = a->ptr.pp_double[i][i]; + c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; } else { - vd = (double)(1); + c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; } - x->ptr.p_double[i] = (x->ptr.p_double[i]-v)/vd; - } - } - else - { - if( nounit ) - { - vd = a->ptr.pp_double[1][1]; } else { - vd = (double)(1); - } - x->ptr.p_double[1] = x->ptr.p_double[1]/vd; - for(i=2; i<=n; i++) - { - im1 = i-1; - if( upper ) - { - v = ae_v_dotproduct(&a->ptr.pp_double[1][i], a->stride, &x->ptr.p_double[1], 1, ae_v_len(1,im1)); - } - else - { - v = ae_v_dotproduct(&a->ptr.pp_double[i][1], 1, &x->ptr.p_double[1], 1, ae_v_len(1,im1)); - } - if( nounit ) - { - vd = a->ptr.pp_double[i][i]; - } - else - { - vd = (double)(1); - } - x->ptr.p_double[i] = (x->ptr.p_double[i]-v)/vd; - } - } - } - else - { - - /* - * Use a Level 1 BLAS solve, scaling intermediate results. - */ - if( ae_fp_greater(xmax,bignum) ) - { - - /* - * Scale X so that its components are less than or equal to - * BIGNUM in absolute value. - */ - *s = bignum/xmax; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), *s); - xmax = bignum; - } - if( notran ) - { - - /* - * Solve A * x = b - */ - j = jfirst; - while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) - { /* - * Compute x(j) = b(j) / A(j,j), scaling x if necessary. + * Determine submatrix [I0..I1]x[J0..J1] to process */ - xj = ae_fabs(x->ptr.p_double[j], _state); - flg = 0; - if( nounit ) - { - tjjs = a->ptr.pp_double[j][j]*tscal; - } - else - { - tjjs = tscal; - if( ae_fp_eq(tscal,(double)(1)) ) - { - flg = 100; - } - } - if( flg!=100 ) + i0 = i; + i1 = ae_minint(i+3, m-1, _state); + j0 = j; + j1 = ae_minint(j+3, n-1, _state); + + /* + * Process submatrix + */ + for(ik=i0; ik<=i1; ik++) { - tjj = ae_fabs(tjjs, _state); - if( ae_fp_greater(tjj,smlnum) ) + for(jk=j0; jk<=j1; jk++) { - - /* - * abs(A(j,j)) > SMLNUM: - */ - if( ae_fp_less(tjj,(double)(1)) ) + if( k==0||ae_fp_eq(alpha,(double)(0)) ) { - if( ae_fp_greater(xj,tjj*bignum) ) - { - - /* - * Scale x by 1/b(j). - */ - rec = 1/xj; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); - *s = *s*rec; - xmax = xmax*rec; - } + v = (double)(0); } - x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; - xj = ae_fabs(x->ptr.p_double[j], _state); - } - else - { - if( ae_fp_greater(tjj,(double)(0)) ) + else { - - /* - * 0 < abs(A(j,j)) <= SMLNUM: - */ - if( ae_fp_greater(xj,tjj*bignum) ) - { - - /* - * Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM - * to avoid overflow when dividing by A(j,j). - */ - rec = tjj*bignum/xj; - if( ae_fp_greater(cnorm->ptr.p_double[j],(double)(1)) ) - { - - /* - * Scale by 1/CNORM(j) to avoid overflow when - * multiplying x(j) times column j. - */ - rec = rec/cnorm->ptr.p_double[j]; - } - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); - *s = *s*rec; - xmax = xmax*rec; - } - x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; - xj = ae_fabs(x->ptr.p_double[j], _state); + v = ae_v_dotproduct(&a->ptr.pp_double[ia+ik][ja], 1, &b->ptr.pp_double[ib+jk][jb], 1, ae_v_len(ja,ja+k-1)); } - else + if( ae_fp_eq(beta,(double)(0)) ) { - - /* - * A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and - * scale = 0, and compute a solution to A*x = 0. - */ - for(i=1; i<=n; i++) - { - x->ptr.p_double[i] = (double)(0); - } - x->ptr.p_double[j] = (double)(1); - xj = (double)(1); - *s = (double)(0); - xmax = (double)(0); + c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; } - } - } - - /* - * Scale x if necessary to avoid overflow when adding a - * multiple of column j of A. - */ - if( ae_fp_greater(xj,(double)(1)) ) - { - rec = 1/xj; - if( ae_fp_greater(cnorm->ptr.p_double[j],(bignum-xmax)*rec) ) - { - - /* - * Scale x by 1/(2*abs(x(j))). - */ - rec = rec*0.5; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); - *s = *s*rec; - } - } - else - { - if( ae_fp_greater(xj*cnorm->ptr.p_double[j],bignum-xmax) ) - { - - /* - * Scale x by 1/2. - */ - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), 0.5); - *s = *s*0.5; - } - } - if( upper ) - { - if( j>1 ) - { - - /* - * Compute the update - * x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) - */ - v = x->ptr.p_double[j]*tscal; - jm1 = j-1; - ae_v_subd(&x->ptr.p_double[1], 1, &a->ptr.pp_double[1][j], a->stride, ae_v_len(1,jm1), v); - i = 1; - for(k=2; k<=j-1; k++) + else { - if( ae_fp_greater(ae_fabs(x->ptr.p_double[k], _state),ae_fabs(x->ptr.p_double[i], _state)) ) - { - i = k; - } + c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; } - xmax = ae_fabs(x->ptr.p_double[i], _state); } } - else - { - if( jptr.p_double[j]*tscal; - ae_v_subd(&x->ptr.p_double[jp1], 1, &a->ptr.pp_double[jp1][j], a->stride, ae_v_len(jp1,n), v); - i = j+1; - for(k=j+2; k<=n; k++) - { - if( ae_fp_greater(ae_fabs(x->ptr.p_double[k], _state),ae_fabs(x->ptr.p_double[i], _state)) ) - { - i = k; - } - } - xmax = ae_fabs(x->ptr.p_double[i], _state); - } - } - j = j+jinc; } + j = j+4; } - else + i = i+4; + } +} + + +/************************************************************************* +RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation +with OpTypeA=1 and OpTypeB=0. + +Additional info: +* this function requires that Alpha<>0 (assertion is thrown otherwise) + +INPUT PARAMETERS + M - matrix size, M>0 + N - matrix size, N>0 + K - matrix size, K>0 + Alpha - coefficient + A - matrix + IA - submatrix offset + JA - submatrix offset + B - matrix + IB - submatrix offset + JB - submatrix offset + Beta - coefficient + C - PREALLOCATED output matrix + IC - submatrix offset + JC - submatrix offset + + -- ALGLIB routine -- + 27.03.2013 + Bochkanov Sergey +*************************************************************************/ +void rmatrixgemmk44v10(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + double v; + double v00; + double v01; + double v02; + double v03; + double v10; + double v11; + double v12; + double v13; + double v20; + double v21; + double v22; + double v23; + double v30; + double v31; + double v32; + double v33; + double a0; + double a1; + double a2; + double a3; + double b0; + double b1; + double b2; + double b3; + ae_int_t idxa0; + ae_int_t idxa1; + ae_int_t idxa2; + ae_int_t idxa3; + ae_int_t idxb0; + ae_int_t idxb1; + ae_int_t idxb2; + ae_int_t idxb3; + ae_int_t i0; + ae_int_t i1; + ae_int_t ik; + ae_int_t j0; + ae_int_t j1; + ae_int_t jk; + ae_int_t t; + ae_int_t offsa; + ae_int_t offsb; + + + ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); + + /* + * if matrix size is zero + */ + if( m==0||n==0 ) + { + return; + } + + /* + * A'*B + */ + i = 0; + while(i0&&j<=jlast)||(jinc<0&&j>=jlast)) + if( i+4<=m&&j+4<=n ) { /* - * Compute x(j) = b(j) - sum A(k,j)*x(k). - * k<>j + * Specialized 4x4 code for [I..I+3]x[J..J+3] submatrix of C. + * + * This submatrix is calculated as sum of K rank-1 products, + * with operands cached in local variables in order to speed + * up operations with arrays. */ - xj = ae_fabs(x->ptr.p_double[j], _state); - uscal = tscal; - rec = 1/ae_maxreal(xmax, (double)(1), _state); - if( ae_fp_greater(cnorm->ptr.p_double[j],(bignum-xj)*rec) ) + idxa0 = ja+i+0; + idxa1 = ja+i+1; + idxa2 = ja+i+2; + idxa3 = ja+i+3; + offsa = ia; + idxb0 = jb+j+0; + idxb1 = jb+j+1; + idxb2 = jb+j+2; + idxb3 = jb+j+3; + offsb = ib; + v00 = 0.0; + v01 = 0.0; + v02 = 0.0; + v03 = 0.0; + v10 = 0.0; + v11 = 0.0; + v12 = 0.0; + v13 = 0.0; + v20 = 0.0; + v21 = 0.0; + v22 = 0.0; + v23 = 0.0; + v30 = 0.0; + v31 = 0.0; + v32 = 0.0; + v33 = 0.0; + for(t=0; t<=k-1; t++) { - - /* - * If x(j) could overflow, scale x by 1/(2*XMAX). - */ - rec = rec*0.5; - if( nounit ) - { - tjjs = a->ptr.pp_double[j][j]*tscal; - } - else - { - tjjs = tscal; - } - tjj = ae_fabs(tjjs, _state); - if( ae_fp_greater(tjj,(double)(1)) ) - { - - /* - * Divide by A(j,j) when scaling x if A(j,j) > 1. - */ - rec = ae_minreal((double)(1), rec*tjj, _state); - uscal = uscal/tjjs; - } - if( ae_fp_less(rec,(double)(1)) ) - { - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); - *s = *s*rec; - xmax = xmax*rec; - } + a0 = a->ptr.pp_double[offsa][idxa0]; + a1 = a->ptr.pp_double[offsa][idxa1]; + b0 = b->ptr.pp_double[offsb][idxb0]; + b1 = b->ptr.pp_double[offsb][idxb1]; + v00 = v00+a0*b0; + v01 = v01+a0*b1; + v10 = v10+a1*b0; + v11 = v11+a1*b1; + a2 = a->ptr.pp_double[offsa][idxa2]; + a3 = a->ptr.pp_double[offsa][idxa3]; + v20 = v20+a2*b0; + v21 = v21+a2*b1; + v30 = v30+a3*b0; + v31 = v31+a3*b1; + b2 = b->ptr.pp_double[offsb][idxb2]; + b3 = b->ptr.pp_double[offsb][idxb3]; + v22 = v22+a2*b2; + v23 = v23+a2*b3; + v32 = v32+a3*b2; + v33 = v33+a3*b3; + v02 = v02+a0*b2; + v03 = v03+a0*b3; + v12 = v12+a1*b2; + v13 = v13+a1*b3; + offsa = offsa+1; + offsb = offsb+1; } - sumj = (double)(0); - if( ae_fp_eq(uscal,(double)(1)) ) + if( ae_fp_eq(beta,(double)(0)) ) { - - /* - * If the scaling needed for A in the dot product is 1, - * call DDOT to perform the dot product. - */ - if( upper ) - { - if( j>1 ) - { - jm1 = j-1; - sumj = ae_v_dotproduct(&a->ptr.pp_double[1][j], a->stride, &x->ptr.p_double[1], 1, ae_v_len(1,jm1)); - } - else - { - sumj = (double)(0); - } - } - else - { - if( jptr.pp_double[jp1][j], a->stride, &x->ptr.p_double[jp1], 1, ae_v_len(jp1,n)); - } - } + c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; } else { - - /* - * Otherwise, use in-line code for the dot product. - */ - if( upper ) - { - for(i=1; i<=j-1; i++) - { - v = a->ptr.pp_double[i][j]*uscal; - sumj = sumj+v*x->ptr.p_double[i]; - } - } - else - { - if( jptr.pp_double[i][j]*uscal; - sumj = sumj+v*x->ptr.p_double[i]; - } - } - } + c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; } - if( ae_fp_eq(uscal,tscal) ) + } + else + { + + /* + * Determine submatrix [I0..I1]x[J0..J1] to process + */ + i0 = i; + i1 = ae_minint(i+3, m-1, _state); + j0 = j; + j1 = ae_minint(j+3, n-1, _state); + + /* + * Process submatrix + */ + for(ik=i0; ik<=i1; ik++) { - - /* - * Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) - * was not used to scale the dotproduct. - */ - x->ptr.p_double[j] = x->ptr.p_double[j]-sumj; - xj = ae_fabs(x->ptr.p_double[j], _state); - flg = 0; - if( nounit ) - { - tjjs = a->ptr.pp_double[j][j]*tscal; - } - else + for(jk=j0; jk<=j1; jk++) { - tjjs = tscal; - if( ae_fp_eq(tscal,(double)(1)) ) + if( k==0||ae_fp_eq(alpha,(double)(0)) ) { - flg = 150; + v = (double)(0); } - } - - /* - * Compute x(j) = x(j) / A(j,j), scaling if necessary. - */ - if( flg!=150 ) - { - tjj = ae_fabs(tjjs, _state); - if( ae_fp_greater(tjj,smlnum) ) + else { - - /* - * abs(A(j,j)) > SMLNUM: - */ - if( ae_fp_less(tjj,(double)(1)) ) - { - if( ae_fp_greater(xj,tjj*bignum) ) - { - - /* - * Scale X by 1/abs(x(j)). - */ - rec = 1/xj; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); - *s = *s*rec; - xmax = xmax*rec; - } - } - x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; + v = 0.0; + v = ae_v_dotproduct(&a->ptr.pp_double[ia][ja+ik], a->stride, &b->ptr.pp_double[ib][jb+jk], b->stride, ae_v_len(ia,ia+k-1)); + } + if( ae_fp_eq(beta,(double)(0)) ) + { + c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; } else { - if( ae_fp_greater(tjj,(double)(0)) ) - { - - /* - * 0 < abs(A(j,j)) <= SMLNUM: - */ - if( ae_fp_greater(xj,tjj*bignum) ) - { - - /* - * Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. - */ - rec = tjj*bignum/xj; - ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); - *s = *s*rec; - xmax = xmax*rec; - } - x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; - } - else - { - - /* - * A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and - * scale = 0, and compute a solution to A'*x = 0. - */ - for(i=1; i<=n; i++) - { - x->ptr.p_double[i] = (double)(0); - } - x->ptr.p_double[j] = (double)(1); - *s = (double)(0); - xmax = (double)(0); - } + c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; } } } - else - { - - /* - * Compute x(j) := x(j) / A(j,j) - sumj if the dot - * product has already been divided by 1/A(j,j). - */ - x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs-sumj; - } - xmax = ae_maxreal(xmax, ae_fabs(x->ptr.p_double[j], _state), _state); - j = j+jinc; } + j = j+4; } - *s = *s/tscal; - } - - /* - * Scale the column norms by 1/TSCAL for return. - */ - if( ae_fp_neq(tscal,(double)(1)) ) - { - v = 1/tscal; - ae_v_muld(&cnorm->ptr.p_double[1], 1, ae_v_len(1,n), v); + i = i+4; } } +/************************************************************************* +RMatrixGEMM kernel, basecase code for RMatrixGEMM, specialized for sitation +with OpTypeA=1 and OpTypeB=1. +Additional info: +* this function requires that Alpha<>0 (assertion is thrown otherwise) -/************************************************************************* -Real implementation of CMatrixScaledTRSafeSolve +INPUT PARAMETERS + M - matrix size, M>0 + N - matrix size, N>0 + K - matrix size, K>0 + Alpha - coefficient + A - matrix + IA - submatrix offset + JA - submatrix offset + B - matrix + IB - submatrix offset + JB - submatrix offset + Beta - coefficient + C - PREALLOCATED output matrix + IC - submatrix offset + JC - submatrix offset -- ALGLIB routine -- - 21.01.2010 + 27.03.2013 Bochkanov Sergey *************************************************************************/ -ae_bool rmatrixscaledtrsafesolve(/* Real */ ae_matrix* a, - double sa, +void rmatrixgemmk44v11(ae_int_t m, ae_int_t n, - /* Real */ ae_vector* x, - ae_bool isupper, - ae_int_t trans, - ae_bool isunit, - double maxgrowth, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, ae_state *_state) { - ae_frame _frame_block; - double lnmax; - double nrmb; - double nrmx; ae_int_t i; - ae_complex alpha; - ae_complex beta; - double vr; - ae_complex cx; - ae_vector tmp; - ae_bool result; - - ae_frame_make(_state, &_frame_block); - ae_vector_init(&tmp, 0, DT_REAL, _state); - - ae_assert(n>0, "RMatrixTRSafeSolve: incorrect N!", _state); - ae_assert(trans==0||trans==1, "RMatrixTRSafeSolve: incorrect Trans!", _state); - result = ae_true; - lnmax = ae_log(ae_maxrealnumber, _state); - - /* - * Quick return if possible - */ - if( n<=0 ) - { - ae_frame_leave(_state); - return result; - } + ae_int_t j; + double v; + double v00; + double v01; + double v02; + double v03; + double v10; + double v11; + double v12; + double v13; + double v20; + double v21; + double v22; + double v23; + double v30; + double v31; + double v32; + double v33; + double a0; + double a1; + double a2; + double a3; + double b0; + double b1; + double b2; + double b3; + ae_int_t idxa0; + ae_int_t idxa1; + ae_int_t idxa2; + ae_int_t idxa3; + ae_int_t idxb0; + ae_int_t idxb1; + ae_int_t idxb2; + ae_int_t idxb3; + ae_int_t i0; + ae_int_t i1; + ae_int_t ik; + ae_int_t j0; + ae_int_t j1; + ae_int_t jk; + ae_int_t t; + ae_int_t offsa; + ae_int_t offsb; + + + ae_assert(ae_fp_neq(alpha,(double)(0)), "RMatrixGEMMK44V00: internal error (Alpha=0)", _state); /* - * Load norms: right part and X + * if matrix size is zero */ - nrmb = (double)(0); - for(i=0; i<=n-1; i++) + if( m==0||n==0 ) { - nrmb = ae_maxreal(nrmb, ae_fabs(x->ptr.p_double[i], _state), _state); + return; } - nrmx = (double)(0); /* - * Solve + * A'*B' */ - ae_vector_set_length(&tmp, n, _state); - result = ae_true; - if( isupper&&trans==0 ) + i = 0; + while(i=0; i--) + j = 0; + while(jptr.pp_double[i][i]*sa); - } - if( iptr.pp_double[i][i+1], 1, ae_v_len(i+1,n-1), sa); - vr = ae_v_dotproduct(&tmp.ptr.p_double[i+1], 1, &x->ptr.p_double[i+1], 1, ae_v_len(i+1,n-1)); - beta = ae_complex_from_d(x->ptr.p_double[i]-vr); + + /* + * Specialized 4x4 code for [I..I+3]x[J..J+3] submatrix of C. + * + * This submatrix is calculated as sum of K rank-1 products, + * with operands cached in local variables in order to speed + * up operations with arrays. + */ + idxa0 = ja+i+0; + idxa1 = ja+i+1; + idxa2 = ja+i+2; + idxa3 = ja+i+3; + offsa = ia; + idxb0 = ib+j+0; + idxb1 = ib+j+1; + idxb2 = ib+j+2; + idxb3 = ib+j+3; + offsb = jb; + v00 = 0.0; + v01 = 0.0; + v02 = 0.0; + v03 = 0.0; + v10 = 0.0; + v11 = 0.0; + v12 = 0.0; + v13 = 0.0; + v20 = 0.0; + v21 = 0.0; + v22 = 0.0; + v23 = 0.0; + v30 = 0.0; + v31 = 0.0; + v32 = 0.0; + v33 = 0.0; + for(t=0; t<=k-1; t++) + { + a0 = a->ptr.pp_double[offsa][idxa0]; + a1 = a->ptr.pp_double[offsa][idxa1]; + b0 = b->ptr.pp_double[idxb0][offsb]; + b1 = b->ptr.pp_double[idxb1][offsb]; + v00 = v00+a0*b0; + v01 = v01+a0*b1; + v10 = v10+a1*b0; + v11 = v11+a1*b1; + a2 = a->ptr.pp_double[offsa][idxa2]; + a3 = a->ptr.pp_double[offsa][idxa3]; + v20 = v20+a2*b0; + v21 = v21+a2*b1; + v30 = v30+a3*b0; + v31 = v31+a3*b1; + b2 = b->ptr.pp_double[idxb2][offsb]; + b3 = b->ptr.pp_double[idxb3][offsb]; + v22 = v22+a2*b2; + v23 = v23+a2*b3; + v32 = v32+a3*b2; + v33 = v33+a3*b3; + v02 = v02+a0*b2; + v03 = v03+a0*b3; + v12 = v12+a1*b2; + v13 = v13+a1*b3; + offsa = offsa+1; + offsb = offsb+1; + } + if( ae_fp_eq(beta,(double)(0)) ) + { + c->ptr.pp_double[ic+i+0][jc+j+0] = alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = alpha*v33; + } + else + { + c->ptr.pp_double[ic+i+0][jc+j+0] = beta*c->ptr.pp_double[ic+i+0][jc+j+0]+alpha*v00; + c->ptr.pp_double[ic+i+0][jc+j+1] = beta*c->ptr.pp_double[ic+i+0][jc+j+1]+alpha*v01; + c->ptr.pp_double[ic+i+0][jc+j+2] = beta*c->ptr.pp_double[ic+i+0][jc+j+2]+alpha*v02; + c->ptr.pp_double[ic+i+0][jc+j+3] = beta*c->ptr.pp_double[ic+i+0][jc+j+3]+alpha*v03; + c->ptr.pp_double[ic+i+1][jc+j+0] = beta*c->ptr.pp_double[ic+i+1][jc+j+0]+alpha*v10; + c->ptr.pp_double[ic+i+1][jc+j+1] = beta*c->ptr.pp_double[ic+i+1][jc+j+1]+alpha*v11; + c->ptr.pp_double[ic+i+1][jc+j+2] = beta*c->ptr.pp_double[ic+i+1][jc+j+2]+alpha*v12; + c->ptr.pp_double[ic+i+1][jc+j+3] = beta*c->ptr.pp_double[ic+i+1][jc+j+3]+alpha*v13; + c->ptr.pp_double[ic+i+2][jc+j+0] = beta*c->ptr.pp_double[ic+i+2][jc+j+0]+alpha*v20; + c->ptr.pp_double[ic+i+2][jc+j+1] = beta*c->ptr.pp_double[ic+i+2][jc+j+1]+alpha*v21; + c->ptr.pp_double[ic+i+2][jc+j+2] = beta*c->ptr.pp_double[ic+i+2][jc+j+2]+alpha*v22; + c->ptr.pp_double[ic+i+2][jc+j+3] = beta*c->ptr.pp_double[ic+i+2][jc+j+3]+alpha*v23; + c->ptr.pp_double[ic+i+3][jc+j+0] = beta*c->ptr.pp_double[ic+i+3][jc+j+0]+alpha*v30; + c->ptr.pp_double[ic+i+3][jc+j+1] = beta*c->ptr.pp_double[ic+i+3][jc+j+1]+alpha*v31; + c->ptr.pp_double[ic+i+3][jc+j+2] = beta*c->ptr.pp_double[ic+i+3][jc+j+2]+alpha*v32; + c->ptr.pp_double[ic+i+3][jc+j+3] = beta*c->ptr.pp_double[ic+i+3][jc+j+3]+alpha*v33; + } } else { - beta = ae_complex_from_d(x->ptr.p_double[i]); - } - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; + + /* + * Determine submatrix [I0..I1]x[J0..J1] to process + */ + i0 = i; + i1 = ae_minint(i+3, m-1, _state); + j0 = j; + j1 = ae_minint(j+3, n-1, _state); + + /* + * Process submatrix + */ + for(ik=i0; ik<=i1; ik++) + { + for(jk=j0; jk<=j1; jk++) + { + if( k==0||ae_fp_eq(alpha,(double)(0)) ) + { + v = (double)(0); + } + else + { + v = 0.0; + v = ae_v_dotproduct(&a->ptr.pp_double[ia][ja+ik], a->stride, &b->ptr.pp_double[ib+jk][jb], 1, ae_v_len(ia,ia+k-1)); + } + if( ae_fp_eq(beta,(double)(0)) ) + { + c->ptr.pp_double[ic+ik][jc+jk] = alpha*v; + } + else + { + c->ptr.pp_double[ic+ik][jc+jk] = beta*c->ptr.pp_double[ic+ik][jc+jk]+alpha*v; + } + } + } } - x->ptr.p_double[i] = cx.x; + j = j+4; } - ae_frame_leave(_state); - return result; + i = i+4; } - if( !isupper&&trans==0 ) - { - - /* - * L*x = b - */ - for(i=0; i<=n-1; i++) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); - } - if( i>0 ) - { - ae_v_moved(&tmp.ptr.p_double[0], 1, &a->ptr.pp_double[i][0], 1, ae_v_len(0,i-1), sa); - vr = ae_v_dotproduct(&tmp.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,i-1)); - beta = ae_complex_from_d(x->ptr.p_double[i]-vr); - } - else - { - beta = ae_complex_from_d(x->ptr.p_double[i]); - } - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_double[i] = cx.x; - } - ae_frame_leave(_state); - return result; - } - if( isupper&&trans==1 ) - { - - /* - * U^T*x = b - */ - for(i=0; i<=n-1; i++) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); - } - beta = ae_complex_from_d(x->ptr.p_double[i]); - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_double[i] = cx.x; - - /* - * update the rest of right part - */ - if( iptr.pp_double[i][i+1], 1, ae_v_len(i+1,n-1), sa); - ae_v_subd(&x->ptr.p_double[i+1], 1, &tmp.ptr.p_double[i+1], 1, ae_v_len(i+1,n-1), vr); - } - } - ae_frame_leave(_state); - return result; - } - if( !isupper&&trans==1 ) - { - - /* - * L^T*x = b - */ - for(i=n-1; i>=0; i--) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); - } - beta = ae_complex_from_d(x->ptr.p_double[i]); - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_double[i] = cx.x; - - /* - * update the rest of right part - */ - if( i>0 ) - { - vr = cx.x; - ae_v_moved(&tmp.ptr.p_double[0], 1, &a->ptr.pp_double[i][0], 1, ae_v_len(0,i-1), sa); - ae_v_subd(&x->ptr.p_double[0], 1, &tmp.ptr.p_double[0], 1, ae_v_len(0,i-1), vr); - } - } - ae_frame_leave(_state); - return result; - } - result = ae_false; - ae_frame_leave(_state); - return result; } +#endif +#if defined(AE_COMPILE_CREFLECTIONS) || !defined(AE_PARTIAL_BUILD) + + /************************************************************************* -Internal subroutine for safe solution of +Generation of an elementary complex reflection transformation - SA*op(A)=b - -where A is NxN upper/lower triangular/unitriangular matrix, op(A) is -either identity transform, transposition or Hermitian transposition, SA is -a scaling factor such that max(|SA*A[i,j]|) is close to 1.0 in magnutude. +The subroutine generates elementary complex reflection H of order N, so +that, for a given X, the following equality holds true: -This subroutine limits relative growth of solution (in inf-norm) by -MaxGrowth, returning False if growth exceeds MaxGrowth. Degenerate or -near-degenerate matrices are handled correctly (False is returned) as long -as MaxGrowth is significantly less than MaxRealNumber/norm(b). + ( X(1) ) ( Beta ) +H' * ( .. ) = ( 0 ), H'*H = I, Beta is a real number + ( X(n) ) ( 0 ) - -- ALGLIB routine -- - 21.01.2010 - Bochkanov Sergey +where + + ( V(1) ) +H = 1 - Tau * ( .. ) * ( conj(V(1)), ..., conj(V(n)) ) + ( V(n) ) + +where the first component of vector V equals 1. + +Input parameters: + X - vector. Array with elements [1..N]. + N - reflection order. + +Output parameters: + X - components from 2 to N are replaced by vector V. + The first component is replaced with parameter Beta. + Tau - scalar value Tau. + +This subroutine is the modification of CLARFG subroutines from the LAPACK +library. It has similar functionality except for the fact that it doesn't +handle errors when intermediate results cause an overflow. + + -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994 *************************************************************************/ -ae_bool cmatrixscaledtrsafesolve(/* Complex */ ae_matrix* a, - double sa, +void complexgeneratereflection(/* Complex */ ae_vector* x, ae_int_t n, - /* Complex */ ae_vector* x, - ae_bool isupper, - ae_int_t trans, - ae_bool isunit, - double maxgrowth, + ae_complex* tau, ae_state *_state) { - ae_frame _frame_block; - double lnmax; - double nrmb; - double nrmx; - ae_int_t i; + ae_int_t j; ae_complex alpha; - ae_complex beta; - ae_complex vc; - ae_vector tmp; - ae_bool result; + double alphi; + double alphr; + double beta; + double xnorm; + double mx; + ae_complex t; + double s; + ae_complex v; - ae_frame_make(_state, &_frame_block); - ae_vector_init(&tmp, 0, DT_COMPLEX, _state); + tau->x = 0; + tau->y = 0; - ae_assert(n>0, "CMatrixTRSafeSolve: incorrect N!", _state); - ae_assert((trans==0||trans==1)||trans==2, "CMatrixTRSafeSolve: incorrect Trans!", _state); - result = ae_true; - lnmax = ae_log(ae_maxrealnumber, _state); - - /* - * Quick return if possible - */ if( n<=0 ) { - ae_frame_leave(_state); - return result; + *tau = ae_complex_from_i(0); + return; } /* - * Load norms: right part and X + * Scale if needed (to avoid overflow/underflow during intermediate + * calculations). */ - nrmb = (double)(0); - for(i=0; i<=n-1; i++) + mx = (double)(0); + for(j=1; j<=n; j++) { - nrmb = ae_maxreal(nrmb, ae_c_abs(x->ptr.p_complex[i], _state), _state); + mx = ae_maxreal(ae_c_abs(x->ptr.p_complex[j], _state), mx, _state); + } + s = (double)(1); + if( ae_fp_neq(mx,(double)(0)) ) + { + if( ae_fp_less(mx,(double)(1)) ) + { + s = ae_sqrt(ae_minrealnumber, _state); + v = ae_complex_from_d(1/s); + ae_v_cmulc(&x->ptr.p_complex[1], 1, ae_v_len(1,n), v); + } + else + { + s = ae_sqrt(ae_maxrealnumber, _state); + v = ae_complex_from_d(1/s); + ae_v_cmulc(&x->ptr.p_complex[1], 1, ae_v_len(1,n), v); + } } - nrmx = (double)(0); /* - * Solve + * calculate */ - ae_vector_set_length(&tmp, n, _state); - result = ae_true; - if( isupper&&trans==0 ) + alpha = x->ptr.p_complex[1]; + mx = (double)(0); + for(j=2; j<=n; j++) { - - /* - * U*x = b - */ - for(i=n-1; i>=0; i--) + mx = ae_maxreal(ae_c_abs(x->ptr.p_complex[j], _state), mx, _state); + } + xnorm = (double)(0); + if( ae_fp_neq(mx,(double)(0)) ) + { + for(j=2; j<=n; j++) { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); - } - if( iptr.pp_complex[i][i+1], 1, "N", ae_v_len(i+1,n-1), sa); - vc = ae_v_cdotproduct(&tmp.ptr.p_complex[i+1], 1, "N", &x->ptr.p_complex[i+1], 1, "N", ae_v_len(i+1,n-1)); - beta = ae_c_sub(x->ptr.p_complex[i],vc); - } - else - { - beta = x->ptr.p_complex[i]; - } - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_complex[i] = vc; + t = ae_c_div_d(x->ptr.p_complex[j],mx); + xnorm = xnorm+ae_c_mul(t,ae_c_conj(t, _state)).x; } - ae_frame_leave(_state); - return result; + xnorm = ae_sqrt(xnorm, _state)*mx; } - if( !isupper&&trans==0 ) + alphr = alpha.x; + alphi = alpha.y; + if( ae_fp_eq(xnorm,(double)(0))&&ae_fp_eq(alphi,(double)(0)) ) { - - /* - * L*x = b - */ - for(i=0; i<=n-1; i++) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); - } - if( i>0 ) - { - ae_v_cmoved(&tmp.ptr.p_complex[0], 1, &a->ptr.pp_complex[i][0], 1, "N", ae_v_len(0,i-1), sa); - vc = ae_v_cdotproduct(&tmp.ptr.p_complex[0], 1, "N", &x->ptr.p_complex[0], 1, "N", ae_v_len(0,i-1)); - beta = ae_c_sub(x->ptr.p_complex[i],vc); - } - else - { - beta = x->ptr.p_complex[i]; - } - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_complex[i] = vc; - } - ae_frame_leave(_state); - return result; + *tau = ae_complex_from_i(0); + x->ptr.p_complex[1] = ae_c_mul_d(x->ptr.p_complex[1],s); + return; } - if( isupper&&trans==1 ) + mx = ae_maxreal(ae_fabs(alphr, _state), ae_fabs(alphi, _state), _state); + mx = ae_maxreal(mx, ae_fabs(xnorm, _state), _state); + beta = -mx*ae_sqrt(ae_sqr(alphr/mx, _state)+ae_sqr(alphi/mx, _state)+ae_sqr(xnorm/mx, _state), _state); + if( ae_fp_less(alphr,(double)(0)) ) { - - /* - * U^T*x = b - */ - for(i=0; i<=n-1; i++) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); - } - beta = x->ptr.p_complex[i]; - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_complex[i] = vc; - - /* - * update the rest of right part - */ - if( iptr.pp_complex[i][i+1], 1, "N", ae_v_len(i+1,n-1), sa); - ae_v_csubc(&x->ptr.p_complex[i+1], 1, &tmp.ptr.p_complex[i+1], 1, "N", ae_v_len(i+1,n-1), vc); - } - } - ae_frame_leave(_state); - return result; + beta = -beta; } - if( !isupper&&trans==1 ) + tau->x = (beta-alphr)/beta; + tau->y = -alphi/beta; + alpha = ae_c_d_div(1,ae_c_sub_d(alpha,beta)); + if( n>1 ) { - - /* - * L^T*x = b - */ - for(i=n-1; i>=0; i--) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); - } - beta = x->ptr.p_complex[i]; - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_complex[i] = vc; - - /* - * update the rest of right part - */ - if( i>0 ) - { - ae_v_cmoved(&tmp.ptr.p_complex[0], 1, &a->ptr.pp_complex[i][0], 1, "N", ae_v_len(0,i-1), sa); - ae_v_csubc(&x->ptr.p_complex[0], 1, &tmp.ptr.p_complex[0], 1, "N", ae_v_len(0,i-1), vc); - } - } - ae_frame_leave(_state); - return result; + ae_v_cmulc(&x->ptr.p_complex[2], 1, ae_v_len(2,n), alpha); } - if( isupper&&trans==2 ) + alpha = ae_complex_from_d(beta); + x->ptr.p_complex[1] = alpha; + + /* + * Scale back + */ + x->ptr.p_complex[1] = ae_c_mul_d(x->ptr.p_complex[1],s); +} + + +/************************************************************************* +Application of an elementary reflection to a rectangular matrix of size MxN + +The algorithm pre-multiplies the matrix by an elementary reflection +transformation which is given by column V and scalar Tau (see the +description of the GenerateReflection). Not the whole matrix but only a +part of it is transformed (rows from M1 to M2, columns from N1 to N2). Only +the elements of this submatrix are changed. + +Note: the matrix is multiplied by H, not by H'. If it is required to +multiply the matrix by H', it is necessary to pass Conj(Tau) instead of Tau. + +Input parameters: + C - matrix to be transformed. + Tau - scalar defining transformation. + V - column defining transformation. + Array whose index ranges within [1..M2-M1+1] + M1, M2 - range of rows to be transformed. + N1, N2 - range of columns to be transformed. + WORK - working array whose index goes from N1 to N2. + +Output parameters: + C - the result of multiplying the input matrix C by the + transformation matrix which is given by Tau and V. + If N1>N2 or M1>M2, C is not modified. + + -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994 +*************************************************************************/ +void complexapplyreflectionfromtheleft(/* Complex */ ae_matrix* c, + ae_complex tau, + /* Complex */ ae_vector* v, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Complex */ ae_vector* work, + ae_state *_state) +{ + ae_complex t; + ae_int_t i; + + + if( (ae_c_eq_d(tau,(double)(0))||n1>n2)||m1>m2 ) { - - /* - * U^H*x = b - */ - for(i=0; i<=n-1; i++) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_c_mul_d(ae_c_conj(a->ptr.pp_complex[i][i], _state),sa); - } - beta = x->ptr.p_complex[i]; - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_complex[i] = vc; - - /* - * update the rest of right part - */ - if( iptr.pp_complex[i][i+1], 1, "Conj", ae_v_len(i+1,n-1), sa); - ae_v_csubc(&x->ptr.p_complex[i+1], 1, &tmp.ptr.p_complex[i+1], 1, "N", ae_v_len(i+1,n-1), vc); - } - } - ae_frame_leave(_state); - return result; + return; } - if( !isupper&&trans==2 ) + + /* + * w := C^T * conj(v) + */ + for(i=n1; i<=n2; i++) { - - /* - * L^T*x = b - */ - for(i=n-1; i>=0; i--) - { - - /* - * Task is reduced to alpha*x[i] = beta - */ - if( isunit ) - { - alpha = ae_complex_from_d(sa); - } - else - { - alpha = ae_c_mul_d(ae_c_conj(a->ptr.pp_complex[i][i], _state),sa); - } - beta = x->ptr.p_complex[i]; - - /* - * solve alpha*x[i] = beta - */ - result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); - if( !result ) - { - ae_frame_leave(_state); - return result; - } - x->ptr.p_complex[i] = vc; - - /* - * update the rest of right part - */ - if( i>0 ) - { - ae_v_cmoved(&tmp.ptr.p_complex[0], 1, &a->ptr.pp_complex[i][0], 1, "Conj", ae_v_len(0,i-1), sa); - ae_v_csubc(&x->ptr.p_complex[0], 1, &tmp.ptr.p_complex[0], 1, "N", ae_v_len(0,i-1), vc); - } - } - ae_frame_leave(_state); - return result; + work->ptr.p_complex[i] = ae_complex_from_i(0); + } + for(i=m1; i<=m2; i++) + { + t = ae_c_conj(v->ptr.p_complex[i+1-m1], _state); + ae_v_caddc(&work->ptr.p_complex[n1], 1, &c->ptr.pp_complex[i][n1], 1, "N", ae_v_len(n1,n2), t); + } + + /* + * C := C - tau * v * w^T + */ + for(i=m1; i<=m2; i++) + { + t = ae_c_mul(v->ptr.p_complex[i-m1+1],tau); + ae_v_csubc(&c->ptr.pp_complex[i][n1], 1, &work->ptr.p_complex[n1], 1, "N", ae_v_len(n1,n2), t); } - result = ae_false; - ae_frame_leave(_state); - return result; } /************************************************************************* -complex basic solver-updater for reduced linear system +Application of an elementary reflection to a rectangular matrix of size MxN - alpha*x[i] = beta +The algorithm post-multiplies the matrix by an elementary reflection +transformation which is given by column V and scalar Tau (see the +description of the GenerateReflection). Not the whole matrix but only a +part of it is transformed (rows from M1 to M2, columns from N1 to N2). +Only the elements of this submatrix are changed. -solves this equation and updates it in overlfow-safe manner (keeping track -of relative growth of solution). +Input parameters: + C - matrix to be transformed. + Tau - scalar defining transformation. + V - column defining transformation. + Array whose index ranges within [1..N2-N1+1] + M1, M2 - range of rows to be transformed. + N1, N2 - range of columns to be transformed. + WORK - working array whose index goes from M1 to M2. -Parameters: - Alpha - alpha - Beta - beta - LnMax - precomputed Ln(MaxRealNumber) - BNorm - inf-norm of b (right part of original system) - MaxGrowth- maximum growth of norm(x) relative to norm(b) - XNorm - inf-norm of other components of X (which are already processed) - it is updated by CBasicSolveAndUpdate. - X - solution +Output parameters: + C - the result of multiplying the input matrix C by the + transformation matrix which is given by Tau and V. + If N1>N2 or M1>M2, C is not modified. - -- ALGLIB routine -- - 26.01.2009 - Bochkanov Sergey + -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + September 30, 1994 *************************************************************************/ -static ae_bool safesolve_cbasicsolveandupdate(ae_complex alpha, - ae_complex beta, - double lnmax, - double bnorm, - double maxgrowth, - double* xnorm, - ae_complex* x, +void complexapplyreflectionfromtheright(/* Complex */ ae_matrix* c, + ae_complex tau, + /* Complex */ ae_vector* v, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Complex */ ae_vector* work, ae_state *_state) { - double v; - ae_bool result; + ae_complex t; + ae_int_t i; + ae_int_t vm; - x->x = 0; - x->y = 0; - result = ae_false; - if( ae_c_eq_d(alpha,(double)(0)) ) - { - return result; - } - if( ae_c_neq_d(beta,(double)(0)) ) + if( (ae_c_eq_d(tau,(double)(0))||n1>n2)||m1>m2 ) { - - /* - * alpha*x[i]=beta - */ - v = ae_log(ae_c_abs(beta, _state), _state)-ae_log(ae_c_abs(alpha, _state), _state); - if( ae_fp_greater(v,lnmax) ) - { - return result; - } - *x = ae_c_div(beta,alpha); + return; } - else + + /* + * w := C * v + */ + vm = n2-n1+1; + for(i=m1; i<=m2; i++) { - - /* - * alpha*x[i]=0 - */ - *x = ae_complex_from_i(0); + t = ae_v_cdotproduct(&c->ptr.pp_complex[i][n1], 1, "N", &v->ptr.p_complex[1], 1, "N", ae_v_len(n1,n2)); + work->ptr.p_complex[i] = t; } /* - * update NrmX, test growth limit + * C := C - w * conj(v^T) */ - *xnorm = ae_maxreal(*xnorm, ae_c_abs(*x, _state), _state); - if( ae_fp_greater(*xnorm,maxgrowth*bnorm) ) + ae_v_cmove(&v->ptr.p_complex[1], 1, &v->ptr.p_complex[1], 1, "Conj", ae_v_len(1,vm)); + for(i=m1; i<=m2; i++) { - return result; + t = ae_c_mul(work->ptr.p_complex[i],tau); + ae_v_csubc(&c->ptr.pp_complex[i][n1], 1, &v->ptr.p_complex[1], 1, "N", ae_v_len(n1,n2), t); } - result = ae_true; - return result; + ae_v_cmove(&v->ptr.p_complex[1], 1, &v->ptr.p_complex[1], 1, "Conj", ae_v_len(1,vm)); } +#endif +#if defined(AE_COMPILE_ROTATIONS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Prepares HPC compuations of chunked gradient with HPCChunkedGradient(). -You have to call this function before calling HPCChunkedGradient() for -a new set of weights. You have to call it only once, see example below: +Application of a sequence of elementary rotations to a matrix -HOW TO PROCESS DATASET WITH THIS FUNCTION: - Grad:=0 - HPCPrepareChunkedGradient(Weights, WCount, NTotal, NOut, Buf) - foreach chunk-of-dataset do - HPCChunkedGradient(...) - HPCFinalizeChunkedGradient(Buf, Grad) +The algorithm pre-multiplies the matrix by a sequence of rotation +transformations which is given by arrays C and S. Depending on the value +of the IsForward parameter either 1 and 2, 3 and 4 and so on (if IsForward=true) +rows are rotated, or the rows N and N-1, N-2 and N-3 and so on, are rotated. + +Not the whole matrix but only a part of it is transformed (rows from M1 to +M2, columns from N1 to N2). Only the elements of this submatrix are changed. + +Input parameters: + IsForward - the sequence of the rotation application. + M1,M2 - the range of rows to be transformed. + N1, N2 - the range of columns to be transformed. + C,S - transformation coefficients. + Array whose index ranges within [1..M2-M1]. + A - processed matrix. + WORK - working array whose index ranges within [N1..N2]. + +Output parameters: + A - transformed matrix. +Utility subroutine. *************************************************************************/ -void hpcpreparechunkedgradient(/* Real */ ae_vector* weights, - ae_int_t wcount, - ae_int_t ntotal, - ae_int_t nin, - ae_int_t nout, - mlpbuffers* buf, +void applyrotationsfromtheleft(ae_bool isforward, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Real */ ae_vector* c, + /* Real */ ae_vector* s, + /* Real */ ae_matrix* a, + /* Real */ ae_vector* work, ae_state *_state) { - ae_int_t i; - ae_int_t batch4size; - ae_int_t chunksize; + ae_int_t j; + ae_int_t jp1; + double ctemp; + double stemp; + double temp; - chunksize = 4; - batch4size = 3*chunksize*ntotal+chunksize*(2*nout+1); - if( buf->xy.rowsxy.colsm2||n1>n2 ) { - ae_matrix_set_length(&buf->xy, chunksize, nin+nout, _state); + return; } - if( buf->xy2.rowsxy2.colsxy2, chunksize, nin+nout, _state); - } - if( buf->xyrow.cntxyrow, nin+nout, _state); - } - if( buf->x.cntx, nin, _state); - } - if( buf->y.cnty, nout, _state); - } - if( buf->desiredy.cntdesiredy, nout, _state); - } - if( buf->batch4buf.cntbatch4buf, batch4size, _state); - } - if( buf->hpcbuf.cnthpcbuf, wcount, _state); - } - if( buf->g.cntg, wcount, _state); + if( n1!=n2 ) + { + + /* + * Common case: N1<>N2 + */ + for(j=m1; j<=m2-1; j++) + { + ctemp = c->ptr.p_double[j-m1+1]; + stemp = s->ptr.p_double[j-m1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + jp1 = j+1; + ae_v_moved(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), ctemp); + ae_v_subd(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), stemp); + ae_v_muld(&a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), ctemp); + ae_v_addd(&a->ptr.pp_double[j][n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), stemp); + ae_v_move(&a->ptr.pp_double[jp1][n1], 1, &work->ptr.p_double[n1], 1, ae_v_len(n1,n2)); + } + } + } + else + { + + /* + * Special case: N1=N2 + */ + for(j=m1; j<=m2-1; j++) + { + ctemp = c->ptr.p_double[j-m1+1]; + stemp = s->ptr.p_double[j-m1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + temp = a->ptr.pp_double[j+1][n1]; + a->ptr.pp_double[j+1][n1] = ctemp*temp-stemp*a->ptr.pp_double[j][n1]; + a->ptr.pp_double[j][n1] = stemp*temp+ctemp*a->ptr.pp_double[j][n1]; + } + } + } } - if( !hpccores_hpcpreparechunkedgradientx(weights, wcount, &buf->hpcbuf, _state) ) + else { - for(i=0; i<=wcount-1; i++) + if( n1!=n2 ) { - buf->hpcbuf.ptr.p_double[i] = 0.0; + + /* + * Common case: N1<>N2 + */ + for(j=m2-1; j>=m1; j--) + { + ctemp = c->ptr.p_double[j-m1+1]; + stemp = s->ptr.p_double[j-m1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + jp1 = j+1; + ae_v_moved(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), ctemp); + ae_v_subd(&work->ptr.p_double[n1], 1, &a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), stemp); + ae_v_muld(&a->ptr.pp_double[j][n1], 1, ae_v_len(n1,n2), ctemp); + ae_v_addd(&a->ptr.pp_double[j][n1], 1, &a->ptr.pp_double[jp1][n1], 1, ae_v_len(n1,n2), stemp); + ae_v_move(&a->ptr.pp_double[jp1][n1], 1, &work->ptr.p_double[n1], 1, ae_v_len(n1,n2)); + } + } + } + else + { + + /* + * Special case: N1=N2 + */ + for(j=m2-1; j>=m1; j--) + { + ctemp = c->ptr.p_double[j-m1+1]; + stemp = s->ptr.p_double[j-m1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + temp = a->ptr.pp_double[j+1][n1]; + a->ptr.pp_double[j+1][n1] = ctemp*temp-stemp*a->ptr.pp_double[j][n1]; + a->ptr.pp_double[j][n1] = stemp*temp+ctemp*a->ptr.pp_double[j][n1]; + } + } } } - buf->wcount = wcount; - buf->ntotal = ntotal; - buf->nin = nin; - buf->nout = nout; - buf->chunksize = chunksize; } /************************************************************************* -Finalizes HPC compuations of chunked gradient with HPCChunkedGradient(). -You have to call this function after calling HPCChunkedGradient() for -a new set of weights. You have to call it only once, see example below: +Application of a sequence of elementary rotations to a matrix -HOW TO PROCESS DATASET WITH THIS FUNCTION: - Grad:=0 - HPCPrepareChunkedGradient(Weights, WCount, NTotal, NOut, Buf) - foreach chunk-of-dataset do - HPCChunkedGradient(...) - HPCFinalizeChunkedGradient(Buf, Grad) +The algorithm post-multiplies the matrix by a sequence of rotation +transformations which is given by arrays C and S. Depending on the value +of the IsForward parameter either 1 and 2, 3 and 4 and so on (if IsForward=true) +rows are rotated, or the rows N and N-1, N-2 and N-3 and so on are rotated. + +Not the whole matrix but only a part of it is transformed (rows from M1 +to M2, columns from N1 to N2). Only the elements of this submatrix are changed. + +Input parameters: + IsForward - the sequence of the rotation application. + M1,M2 - the range of rows to be transformed. + N1, N2 - the range of columns to be transformed. + C,S - transformation coefficients. + Array whose index ranges within [1..N2-N1]. + A - processed matrix. + WORK - working array whose index ranges within [M1..M2]. +Output parameters: + A - transformed matrix. + +Utility subroutine. *************************************************************************/ -void hpcfinalizechunkedgradient(mlpbuffers* buf, - /* Real */ ae_vector* grad, +void applyrotationsfromtheright(ae_bool isforward, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Real */ ae_vector* c, + /* Real */ ae_vector* s, + /* Real */ ae_matrix* a, + /* Real */ ae_vector* work, ae_state *_state) { - ae_int_t i; + ae_int_t j; + ae_int_t jp1; + double ctemp; + double stemp; + double temp; - if( !hpccores_hpcfinalizechunkedgradientx(&buf->hpcbuf, buf->wcount, grad, _state) ) + + /* + * Form A * P' + */ + if( isforward ) { - for(i=0; i<=buf->wcount-1; i++) + if( m1!=m2 ) { - grad->ptr.p_double[i] = grad->ptr.p_double[i]+buf->hpcbuf.ptr.p_double[i]; + + /* + * Common case: M1<>M2 + */ + for(j=n1; j<=n2-1; j++) + { + ctemp = c->ptr.p_double[j-n1+1]; + stemp = s->ptr.p_double[j-n1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + jp1 = j+1; + ae_v_moved(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), ctemp); + ae_v_subd(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), stemp); + ae_v_muld(&a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), ctemp); + ae_v_addd(&a->ptr.pp_double[m1][j], a->stride, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), stemp); + ae_v_move(&a->ptr.pp_double[m1][jp1], a->stride, &work->ptr.p_double[m1], 1, ae_v_len(m1,m2)); + } + } + } + else + { + + /* + * Special case: M1=M2 + */ + for(j=n1; j<=n2-1; j++) + { + ctemp = c->ptr.p_double[j-n1+1]; + stemp = s->ptr.p_double[j-n1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + temp = a->ptr.pp_double[m1][j+1]; + a->ptr.pp_double[m1][j+1] = ctemp*temp-stemp*a->ptr.pp_double[m1][j]; + a->ptr.pp_double[m1][j] = stemp*temp+ctemp*a->ptr.pp_double[m1][j]; + } + } + } + } + else + { + if( m1!=m2 ) + { + + /* + * Common case: M1<>M2 + */ + for(j=n2-1; j>=n1; j--) + { + ctemp = c->ptr.p_double[j-n1+1]; + stemp = s->ptr.p_double[j-n1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + jp1 = j+1; + ae_v_moved(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), ctemp); + ae_v_subd(&work->ptr.p_double[m1], 1, &a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), stemp); + ae_v_muld(&a->ptr.pp_double[m1][j], a->stride, ae_v_len(m1,m2), ctemp); + ae_v_addd(&a->ptr.pp_double[m1][j], a->stride, &a->ptr.pp_double[m1][jp1], a->stride, ae_v_len(m1,m2), stemp); + ae_v_move(&a->ptr.pp_double[m1][jp1], a->stride, &work->ptr.p_double[m1], 1, ae_v_len(m1,m2)); + } + } + } + else + { + + /* + * Special case: M1=M2 + */ + for(j=n2-1; j>=n1; j--) + { + ctemp = c->ptr.p_double[j-n1+1]; + stemp = s->ptr.p_double[j-n1+1]; + if( ae_fp_neq(ctemp,(double)(1))||ae_fp_neq(stemp,(double)(0)) ) + { + temp = a->ptr.pp_double[m1][j+1]; + a->ptr.pp_double[m1][j+1] = ctemp*temp-stemp*a->ptr.pp_double[m1][j]; + a->ptr.pp_double[m1][j] = stemp*temp+ctemp*a->ptr.pp_double[m1][j]; + } + } } } } /************************************************************************* -Fast kernel for chunked gradient. +The subroutine generates the elementary rotation, so that: -*************************************************************************/ -ae_bool hpcchunkedgradient(/* Real */ ae_vector* weights, - /* Integer */ ae_vector* structinfo, - /* Real */ ae_vector* columnmeans, - /* Real */ ae_vector* columnsigmas, - /* Real */ ae_matrix* xy, - ae_int_t cstart, - ae_int_t csize, - /* Real */ ae_vector* batch4buf, - /* Real */ ae_vector* hpcbuf, - double* e, - ae_bool naturalerrorfunc, +[ CS SN ] . [ F ] = [ R ] +[ -SN CS ] [ G ] [ 0 ] + +CS**2 + SN**2 = 1 +*************************************************************************/ +void generaterotation(double f, + double g, + double* cs, + double* sn, + double* r, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_SSE2 - ae_bool result; + double f1; + double g1; + *cs = 0; + *sn = 0; + *r = 0; - result = ae_false; - return result; -#else - return _ialglib_i_hpcchunkedgradient(weights, structinfo, columnmeans, columnsigmas, xy, cstart, csize, batch4buf, hpcbuf, e, naturalerrorfunc); -#endif + if( ae_fp_eq(g,(double)(0)) ) + { + *cs = (double)(1); + *sn = (double)(0); + *r = f; + } + else + { + if( ae_fp_eq(f,(double)(0)) ) + { + *cs = (double)(0); + *sn = (double)(1); + *r = g; + } + else + { + f1 = f; + g1 = g; + if( ae_fp_greater(ae_fabs(f1, _state),ae_fabs(g1, _state)) ) + { + *r = ae_fabs(f1, _state)*ae_sqrt(1+ae_sqr(g1/f1, _state), _state); + } + else + { + *r = ae_fabs(g1, _state)*ae_sqrt(1+ae_sqr(f1/g1, _state), _state); + } + *cs = f1/(*r); + *sn = g1/(*r); + if( ae_fp_greater(ae_fabs(f, _state),ae_fabs(g, _state))&&ae_fp_less(*cs,(double)(0)) ) + { + *cs = -*cs; + *sn = -*sn; + *r = -*r; + } + } + } } -/************************************************************************* -Fast kernel for chunked processing. - -*************************************************************************/ -ae_bool hpcchunkedprocess(/* Real */ ae_vector* weights, - /* Integer */ ae_vector* structinfo, - /* Real */ ae_vector* columnmeans, - /* Real */ ae_vector* columnsigmas, - /* Real */ ae_matrix* xy, - ae_int_t cstart, - ae_int_t csize, - /* Real */ ae_vector* batch4buf, - /* Real */ ae_vector* hpcbuf, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_SSE2 - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_hpcchunkedprocess(weights, structinfo, columnmeans, columnsigmas, xy, cstart, csize, batch4buf, hpcbuf); #endif -} +#if defined(AE_COMPILE_TRLINSOLVE) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Stub function. +Utility subroutine performing the "safe" solution of system of linear +equations with triangular coefficient matrices. - -- ALGLIB routine -- - 14.06.2013 - Bochkanov Sergey -*************************************************************************/ -static ae_bool hpccores_hpcpreparechunkedgradientx(/* Real */ ae_vector* weights, - ae_int_t wcount, - /* Real */ ae_vector* hpcbuf, - ae_state *_state) -{ -#ifndef ALGLIB_INTERCEPTS_SSE2 - ae_bool result; +The subroutine uses scaling and solves the scaled system A*x=s*b (where s +is a scalar value) instead of A*x=b, choosing s so that x can be +represented by a floating-point number. The closer the system gets to a +singular, the less s is. If the system is singular, s=0 and x contains the +non-trivial solution of equation A*x=0. +The feature of an algorithm is that it could not cause an overflow or a +division by zero regardless of the matrix used as the input. - result = ae_false; - return result; -#else - return _ialglib_i_hpcpreparechunkedgradientx(weights, wcount, hpcbuf); -#endif -} +The algorithm can solve systems of equations with upper/lower triangular +matrices, with/without unit diagonal, and systems of type A*x=b or A'*x=b +(where A' is a transposed matrix A). +Input parameters: + A - system matrix. Array whose indexes range within [0..N-1, 0..N-1]. + N - size of matrix A. + X - right-hand member of a system. + Array whose index ranges within [0..N-1]. + IsUpper - matrix type. If it is True, the system matrix is the upper + triangular and is located in the corresponding part of + matrix A. + Trans - problem type. If it is True, the problem to be solved is + A'*x=b, otherwise it is A*x=b. + Isunit - matrix type. If it is True, the system matrix has a unit + diagonal (the elements on the main diagonal are not used + in the calculation process), otherwise the matrix is considered + to be a general triangular matrix. -/************************************************************************* -Stub function. +Output parameters: + X - solution. Array whose index ranges within [0..N-1]. + S - scaling factor. - -- ALGLIB routine -- - 14.06.2013 - Bochkanov Sergey + -- LAPACK auxiliary routine (version 3.0) -- + Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd., + Courant Institute, Argonne National Lab, and Rice University + June 30, 1992 *************************************************************************/ -static ae_bool hpccores_hpcfinalizechunkedgradientx(/* Real */ ae_vector* buf, - ae_int_t wcount, - /* Real */ ae_vector* grad, +void rmatrixtrsafesolve(/* Real */ ae_matrix* a, + ae_int_t n, + /* Real */ ae_vector* x, + double* s, + ae_bool isupper, + ae_bool istrans, + ae_bool isunit, ae_state *_state) { -#ifndef ALGLIB_INTERCEPTS_SSE2 - ae_bool result; - - - result = ae_false; - return result; -#else - return _ialglib_i_hpcfinalizechunkedgradientx(buf, wcount, grad); -#endif -} + ae_frame _frame_block; + ae_bool normin; + ae_vector cnorm; + ae_matrix a1; + ae_vector x1; + ae_int_t i; + ae_frame_make(_state, &_frame_block); + memset(&cnorm, 0, sizeof(cnorm)); + memset(&a1, 0, sizeof(a1)); + memset(&x1, 0, sizeof(x1)); + *s = 0; + ae_vector_init(&cnorm, 0, DT_REAL, _state, ae_true); + ae_matrix_init(&a1, 0, 0, DT_REAL, _state, ae_true); + ae_vector_init(&x1, 0, DT_REAL, _state, ae_true); -void _mlpbuffers_init(void* _p, ae_state *_state) -{ - mlpbuffers *p = (mlpbuffers*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->batch4buf, 0, DT_REAL, _state); - ae_vector_init(&p->hpcbuf, 0, DT_REAL, _state); - ae_matrix_init(&p->xy, 0, 0, DT_REAL, _state); - ae_matrix_init(&p->xy2, 0, 0, DT_REAL, _state); - ae_vector_init(&p->xyrow, 0, DT_REAL, _state); - ae_vector_init(&p->x, 0, DT_REAL, _state); - ae_vector_init(&p->y, 0, DT_REAL, _state); - ae_vector_init(&p->desiredy, 0, DT_REAL, _state); - ae_vector_init(&p->g, 0, DT_REAL, _state); - ae_vector_init(&p->tmp0, 0, DT_REAL, _state); + + /* + * From 0-based to 1-based + */ + normin = ae_false; + ae_matrix_set_length(&a1, n+1, n+1, _state); + ae_vector_set_length(&x1, n+1, _state); + for(i=1; i<=n; i++) + { + ae_v_move(&a1.ptr.pp_double[i][1], 1, &a->ptr.pp_double[i-1][0], 1, ae_v_len(1,n)); + } + ae_v_move(&x1.ptr.p_double[1], 1, &x->ptr.p_double[0], 1, ae_v_len(1,n)); + + /* + * Solve 1-based + */ + safesolvetriangular(&a1, n, &x1, s, isupper, istrans, isunit, normin, &cnorm, _state); + + /* + * From 1-based to 0-based + */ + ae_v_move(&x->ptr.p_double[0], 1, &x1.ptr.p_double[1], 1, ae_v_len(0,n-1)); + ae_frame_leave(_state); } -void _mlpbuffers_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Obsolete 1-based subroutine. +See RMatrixTRSafeSolve for 0-based replacement. +*************************************************************************/ +void safesolvetriangular(/* Real */ ae_matrix* a, + ae_int_t n, + /* Real */ ae_vector* x, + double* s, + ae_bool isupper, + ae_bool istrans, + ae_bool isunit, + ae_bool normin, + /* Real */ ae_vector* cnorm, + ae_state *_state) { - mlpbuffers *dst = (mlpbuffers*)_dst; - mlpbuffers *src = (mlpbuffers*)_src; - dst->chunksize = src->chunksize; - dst->ntotal = src->ntotal; - dst->nin = src->nin; - dst->nout = src->nout; - dst->wcount = src->wcount; - ae_vector_init_copy(&dst->batch4buf, &src->batch4buf, _state); - ae_vector_init_copy(&dst->hpcbuf, &src->hpcbuf, _state); - ae_matrix_init_copy(&dst->xy, &src->xy, _state); - ae_matrix_init_copy(&dst->xy2, &src->xy2, _state); - ae_vector_init_copy(&dst->xyrow, &src->xyrow, _state); - ae_vector_init_copy(&dst->x, &src->x, _state); - ae_vector_init_copy(&dst->y, &src->y, _state); - ae_vector_init_copy(&dst->desiredy, &src->desiredy, _state); - dst->e = src->e; - ae_vector_init_copy(&dst->g, &src->g, _state); - ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state); -} + ae_int_t i; + ae_int_t imax; + ae_int_t j; + ae_int_t jfirst; + ae_int_t jinc; + ae_int_t jlast; + ae_int_t jm1; + ae_int_t jp1; + ae_int_t ip1; + ae_int_t im1; + ae_int_t k; + ae_int_t flg; + double v; + double vd; + double bignum; + double grow; + double rec; + double smlnum; + double sumj; + double tjj; + double tjjs; + double tmax; + double tscal; + double uscal; + double xbnd; + double xj; + double xmax; + ae_bool notran; + ae_bool upper; + ae_bool nounit; + *s = 0; -void _mlpbuffers_clear(void* _p) -{ - mlpbuffers *p = (mlpbuffers*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->batch4buf); - ae_vector_clear(&p->hpcbuf); - ae_matrix_clear(&p->xy); - ae_matrix_clear(&p->xy2); - ae_vector_clear(&p->xyrow); - ae_vector_clear(&p->x); - ae_vector_clear(&p->y); - ae_vector_clear(&p->desiredy); - ae_vector_clear(&p->g); - ae_vector_clear(&p->tmp0); -} - - -void _mlpbuffers_destroy(void* _p) -{ - mlpbuffers *p = (mlpbuffers*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->batch4buf); - ae_vector_destroy(&p->hpcbuf); - ae_matrix_destroy(&p->xy); - ae_matrix_destroy(&p->xy2); - ae_vector_destroy(&p->xyrow); - ae_vector_destroy(&p->x); - ae_vector_destroy(&p->y); - ae_vector_destroy(&p->desiredy); - ae_vector_destroy(&p->g); - ae_vector_destroy(&p->tmp0); -} - - - - -/************************************************************************* -More precise dot-product. Absolute error of subroutine result is about -1 ulp of max(MX,V), where: - MX = max( |a[i]*b[i]| ) - V = |(a,b)| - -INPUT PARAMETERS - A - array[0..N-1], vector 1 - B - array[0..N-1], vector 2 - N - vectors length, N<2^29. - Temp - array[0..N-1], pre-allocated temporary storage - -OUTPUT PARAMETERS - R - (A,B) - RErr - estimate of error. This estimate accounts for both errors - during calculation of (A,B) and errors introduced by - rounding of A and B to fit in double (about 1 ulp). - - -- ALGLIB -- - Copyright 24.08.2009 by Bochkanov Sergey -*************************************************************************/ -void xdot(/* Real */ ae_vector* a, - /* Real */ ae_vector* b, - ae_int_t n, - /* Real */ ae_vector* temp, - double* r, - double* rerr, - ae_state *_state) -{ - ae_int_t i; - double mx; - double v; - - *r = 0; - *rerr = 0; - + upper = isupper; + notran = !istrans; + nounit = !isunit; /* - * special cases: - * * N=0 + * these initializers are not really necessary, + * but without them compiler complains about uninitialized locals */ - if( n==0 ) - { - *r = (double)(0); - *rerr = (double)(0); - return; - } - mx = (double)(0); - for(i=0; i<=n-1; i++) - { - v = a->ptr.p_double[i]*b->ptr.p_double[i]; - temp->ptr.p_double[i] = v; - mx = ae_maxreal(mx, ae_fabs(v, _state), _state); - } - if( ae_fp_eq(mx,(double)(0)) ) - { - *r = (double)(0); - *rerr = (double)(0); - return; - } - xblas_xsum(temp, mx, n, r, rerr, _state); -} - - -/************************************************************************* -More precise complex dot-product. Absolute error of subroutine result is -about 1 ulp of max(MX,V), where: - MX = max( |a[i]*b[i]| ) - V = |(a,b)| - -INPUT PARAMETERS - A - array[0..N-1], vector 1 - B - array[0..N-1], vector 2 - N - vectors length, N<2^29. - Temp - array[0..2*N-1], pre-allocated temporary storage - -OUTPUT PARAMETERS - R - (A,B) - RErr - estimate of error. This estimate accounts for both errors - during calculation of (A,B) and errors introduced by - rounding of A and B to fit in double (about 1 ulp). - - -- ALGLIB -- - Copyright 27.01.2010 by Bochkanov Sergey -*************************************************************************/ -void xcdot(/* Complex */ ae_vector* a, - /* Complex */ ae_vector* b, - ae_int_t n, - /* Real */ ae_vector* temp, - ae_complex* r, - double* rerr, - ae_state *_state) -{ - ae_int_t i; - double mx; - double v; - double rerrx; - double rerry; - - r->x = 0; - r->y = 0; - *rerr = 0; - + tjjs = (double)(0); /* - * special cases: - * * N=0 + * Quick return if possible */ if( n==0 ) { - *r = ae_complex_from_i(0); - *rerr = (double)(0); return; } /* - * calculate real part + * Determine machine dependent parameters to control overflow. */ - mx = (double)(0); - for(i=0; i<=n-1; i++) - { - v = a->ptr.p_complex[i].x*b->ptr.p_complex[i].x; - temp->ptr.p_double[2*i+0] = v; - mx = ae_maxreal(mx, ae_fabs(v, _state), _state); - v = -a->ptr.p_complex[i].y*b->ptr.p_complex[i].y; - temp->ptr.p_double[2*i+1] = v; - mx = ae_maxreal(mx, ae_fabs(v, _state), _state); - } - if( ae_fp_eq(mx,(double)(0)) ) - { - r->x = (double)(0); - rerrx = (double)(0); - } - else + smlnum = ae_minrealnumber/(ae_machineepsilon*2); + bignum = 1/smlnum; + *s = (double)(1); + if( !normin ) { - xblas_xsum(temp, mx, 2*n, &r->x, &rerrx, _state); + ae_vector_set_length(cnorm, n+1, _state); + + /* + * Compute the 1-norm of each column, not including the diagonal. + */ + if( upper ) + { + + /* + * A is upper triangular. + */ + for(j=1; j<=n; j++) + { + v = (double)(0); + for(k=1; k<=j-1; k++) + { + v = v+ae_fabs(a->ptr.pp_double[k][j], _state); + } + cnorm->ptr.p_double[j] = v; + } + } + else + { + + /* + * A is lower triangular. + */ + for(j=1; j<=n-1; j++) + { + v = (double)(0); + for(k=j+1; k<=n; k++) + { + v = v+ae_fabs(a->ptr.pp_double[k][j], _state); + } + cnorm->ptr.p_double[j] = v; + } + cnorm->ptr.p_double[n] = (double)(0); + } } /* - * calculate imaginary part + * Scale the column norms by TSCAL if the maximum element in CNORM is + * greater than BIGNUM. */ - mx = (double)(0); - for(i=0; i<=n-1; i++) + imax = 1; + for(k=2; k<=n; k++) { - v = a->ptr.p_complex[i].x*b->ptr.p_complex[i].y; - temp->ptr.p_double[2*i+0] = v; - mx = ae_maxreal(mx, ae_fabs(v, _state), _state); - v = a->ptr.p_complex[i].y*b->ptr.p_complex[i].x; - temp->ptr.p_double[2*i+1] = v; - mx = ae_maxreal(mx, ae_fabs(v, _state), _state); + if( ae_fp_greater(cnorm->ptr.p_double[k],cnorm->ptr.p_double[imax]) ) + { + imax = k; + } } - if( ae_fp_eq(mx,(double)(0)) ) + tmax = cnorm->ptr.p_double[imax]; + if( ae_fp_less_eq(tmax,bignum) ) { - r->y = (double)(0); - rerry = (double)(0); + tscal = (double)(1); } else { - xblas_xsum(temp, mx, 2*n, &r->y, &rerry, _state); + tscal = 1/(smlnum*tmax); + ae_v_muld(&cnorm->ptr.p_double[1], 1, ae_v_len(1,n), tscal); } /* - * total error + * Compute a bound on the computed solution vector to see if the + * Level 2 BLAS routine DTRSV can be used. */ - if( ae_fp_eq(rerrx,(double)(0))&&ae_fp_eq(rerry,(double)(0)) ) + j = 1; + for(k=2; k<=n; k++) { - *rerr = (double)(0); + if( ae_fp_greater(ae_fabs(x->ptr.p_double[k], _state),ae_fabs(x->ptr.p_double[j], _state)) ) + { + j = k; + } } - else + xmax = ae_fabs(x->ptr.p_double[j], _state); + xbnd = xmax; + if( notran ) { - *rerr = ae_maxreal(rerrx, rerry, _state)*ae_sqrt(1+ae_sqr(ae_minreal(rerrx, rerry, _state)/ae_maxreal(rerrx, rerry, _state), _state), _state); - } -} - - -/************************************************************************* -Internal subroutine for extra-precise calculation of SUM(w[i]). - -INPUT PARAMETERS: - W - array[0..N-1], values to be added - W is modified during calculations. - MX - max(W[i]) - N - array size - -OUTPUT PARAMETERS: - R - SUM(w[i]) - RErr- error estimate for R - - -- ALGLIB -- - Copyright 24.08.2009 by Bochkanov Sergey -*************************************************************************/ -static void xblas_xsum(/* Real */ ae_vector* w, - double mx, - ae_int_t n, - double* r, - double* rerr, - ae_state *_state) -{ - ae_int_t i; - ae_int_t k; - ae_int_t ks; - double v; - double s; - double ln2; - double chunk; - double invchunk; - ae_bool allzeros; - - *r = 0; - *rerr = 0; - - - /* - * special cases: - * * N=0 - * * N is too large to use integer arithmetics - */ - if( n==0 ) - { - *r = (double)(0); - *rerr = (double)(0); - return; - } - if( ae_fp_eq(mx,(double)(0)) ) - { - *r = (double)(0); - *rerr = (double)(0); - return; - } - ae_assert(n<536870912, "XDot: N is too large!", _state); - - /* - * Prepare - */ - ln2 = ae_log((double)(2), _state); - *rerr = mx*ae_machineepsilon; - - /* - * 1. find S such that 0.5<=S*MX<1 - * 2. multiply W by S, so task is normalized in some sense - * 3. S:=1/S so we can obtain original vector multiplying by S - */ - k = ae_round(ae_log(mx, _state)/ln2, _state); - s = xblas_xfastpow((double)(2), -k, _state); - while(ae_fp_greater_eq(s*mx,(double)(1))) - { - s = 0.5*s; - } - while(ae_fp_less(s*mx,0.5)) - { - s = 2*s; - } - ae_v_muld(&w->ptr.p_double[0], 1, ae_v_len(0,n-1), s); - s = 1/s; - - /* - * find Chunk=2^M such that N*Chunk<2^29 - * - * we have chosen upper limit (2^29) with enough space left - * to tolerate possible problems with rounding and N's close - * to the limit, so we don't want to be very strict here. - */ - k = ae_trunc(ae_log((double)536870912/(double)n, _state)/ln2, _state); - chunk = xblas_xfastpow((double)(2), k, _state); - if( ae_fp_less(chunk,(double)(2)) ) - { - chunk = (double)(2); - } - invchunk = 1/chunk; - - /* - * calculate result - */ - *r = (double)(0); - ae_v_muld(&w->ptr.p_double[0], 1, ae_v_len(0,n-1), chunk); - for(;;) - { - s = s*invchunk; - allzeros = ae_true; - ks = 0; - for(i=0; i<=n-1; i++) + + /* + * Compute the growth in A * x = b. + */ + if( upper ) { - v = w->ptr.p_double[i]; - k = ae_trunc(v, _state); - if( ae_fp_neq(v,(double)(k)) ) - { - allzeros = ae_false; - } - w->ptr.p_double[i] = chunk*(v-k); - ks = ks+k; + jfirst = n; + jlast = 1; + jinc = -1; } - *r = *r+s*ks; - v = ae_fabs(*r, _state); - if( allzeros||ae_fp_eq(s*n+mx,mx) ) + else { - break; + jfirst = 1; + jlast = n; + jinc = 1; } - } - - /* - * correct error - */ - *rerr = ae_maxreal(*rerr, ae_fabs(*r, _state)*ae_machineepsilon, _state); -} - - -/************************************************************************* -Fast Pow - - -- ALGLIB -- - Copyright 24.08.2009 by Bochkanov Sergey -*************************************************************************/ -static double xblas_xfastpow(double r, ae_int_t n, ae_state *_state) -{ - double result; - - - result = (double)(0); - if( n>0 ) - { - if( n%2==0 ) + if( ae_fp_neq(tscal,(double)(1)) ) { - result = ae_sqr(xblas_xfastpow(r, n/2, _state), _state); + grow = (double)(0); } else { - result = r*xblas_xfastpow(r, n-1, _state); + if( nounit ) + { + + /* + * A is non-unit triangular. + * + * Compute GROW = 1/G(j) and XBND = 1/M(j). + * Initially, G(0) = max{x(i), i=1,...,n}. + */ + grow = 1/ae_maxreal(xbnd, smlnum, _state); + xbnd = grow; + j = jfirst; + while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) + { + + /* + * Exit the loop if the growth factor is too small. + */ + if( ae_fp_less_eq(grow,smlnum) ) + { + break; + } + + /* + * M(j) = G(j-1) / abs(A(j,j)) + */ + tjj = ae_fabs(a->ptr.pp_double[j][j], _state); + xbnd = ae_minreal(xbnd, ae_minreal((double)(1), tjj, _state)*grow, _state); + if( ae_fp_greater_eq(tjj+cnorm->ptr.p_double[j],smlnum) ) + { + + /* + * G(j) = G(j-1)*( 1 + CNORM(j) / abs(A(j,j)) ) + */ + grow = grow*(tjj/(tjj+cnorm->ptr.p_double[j])); + } + else + { + + /* + * G(j) could overflow, set GROW to 0. + */ + grow = (double)(0); + } + if( j==jlast ) + { + grow = xbnd; + } + j = j+jinc; + } + } + else + { + + /* + * A is unit triangular. + * + * Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. + */ + grow = ae_minreal((double)(1), 1/ae_maxreal(xbnd, smlnum, _state), _state); + j = jfirst; + while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) + { + + /* + * Exit the loop if the growth factor is too small. + */ + if( ae_fp_less_eq(grow,smlnum) ) + { + break; + } + + /* + * G(j) = G(j-1)*( 1 + CNORM(j) ) + */ + grow = grow*(1/(1+cnorm->ptr.p_double[j])); + j = j+jinc; + } + } } - return result; - } - if( n==0 ) - { - result = (double)(1); } - if( n<0 ) + else { - result = xblas_xfastpow(1/r, -n, _state); - } - return result; -} - - - - -/************************************************************************* -Normalizes direction/step pair: makes |D|=1, scales Stp. -If |D|=0, it returns, leavind D/Stp unchanged. - - -- ALGLIB -- - Copyright 01.04.2010 by Bochkanov Sergey -*************************************************************************/ -void linminnormalized(/* Real */ ae_vector* d, - double* stp, - ae_int_t n, - ae_state *_state) -{ - double mx; - double s; - ae_int_t i; - - - - /* - * first, scale D to avoid underflow/overflow durng squaring - */ - mx = (double)(0); - for(i=0; i<=n-1; i++) - { - mx = ae_maxreal(mx, ae_fabs(d->ptr.p_double[i], _state), _state); - } - if( ae_fp_eq(mx,(double)(0)) ) - { - return; - } - s = 1/mx; - ae_v_muld(&d->ptr.p_double[0], 1, ae_v_len(0,n-1), s); - *stp = *stp/s; - - /* - * normalize D - */ - s = ae_v_dotproduct(&d->ptr.p_double[0], 1, &d->ptr.p_double[0], 1, ae_v_len(0,n-1)); - s = 1/ae_sqrt(s, _state); - ae_v_muld(&d->ptr.p_double[0], 1, ae_v_len(0,n-1), s); - *stp = *stp/s; -} - - -/************************************************************************* -THE PURPOSE OF MCSRCH IS TO FIND A STEP WHICH SATISFIES A SUFFICIENT -DECREASE CONDITION AND A CURVATURE CONDITION. - -AT EACH STAGE THE SUBROUTINE UPDATES AN INTERVAL OF UNCERTAINTY WITH -ENDPOINTS STX AND STY. THE INTERVAL OF UNCERTAINTY IS INITIALLY CHOSEN -SO THAT IT CONTAINS A MINIMIZER OF THE MODIFIED FUNCTION - - F(X+STP*S) - F(X) - FTOL*STP*(GRADF(X)'S). - -IF A STEP IS OBTAINED FOR WHICH THE MODIFIED FUNCTION HAS A NONPOSITIVE -FUNCTION VALUE AND NONNEGATIVE DERIVATIVE, THEN THE INTERVAL OF -UNCERTAINTY IS CHOSEN SO THAT IT CONTAINS A MINIMIZER OF F(X+STP*S). - -THE ALGORITHM IS DESIGNED TO FIND A STEP WHICH SATISFIES THE SUFFICIENT -DECREASE CONDITION - - F(X+STP*S) .LE. F(X) + FTOL*STP*(GRADF(X)'S), - -AND THE CURVATURE CONDITION - - ABS(GRADF(X+STP*S)'S)) .LE. GTOL*ABS(GRADF(X)'S). - -IF FTOL IS LESS THAN GTOL AND IF, FOR EXAMPLE, THE FUNCTION IS BOUNDED -BELOW, THEN THERE IS ALWAYS A STEP WHICH SATISFIES BOTH CONDITIONS. -IF NO STEP CAN BE FOUND WHICH SATISFIES BOTH CONDITIONS, THEN THE -ALGORITHM USUALLY STOPS WHEN ROUNDING ERRORS PREVENT FURTHER PROGRESS. -IN THIS CASE STP ONLY SATISFIES THE SUFFICIENT DECREASE CONDITION. - - -:::::::::::::IMPORTANT NOTES::::::::::::: - -NOTE 1: - -This routine guarantees that it will stop at the last point where function -value was calculated. It won't make several additional function evaluations -after finding good point. So if you store function evaluations requested by -this routine, you can be sure that last one is the point where we've stopped. - -NOTE 2: - -when 0initial_point - after rounding to machine precision -::::::::::::::::::::::::::::::::::::::::: - - -PARAMETERS DESCRIPRION - -STAGE IS ZERO ON FIRST CALL, ZERO ON FINAL EXIT - -N IS A POSITIVE INTEGER INPUT VARIABLE SET TO THE NUMBER OF VARIABLES. - -X IS AN ARRAY OF LENGTH N. ON INPUT IT MUST CONTAIN THE BASE POINT FOR -THE LINE SEARCH. ON OUTPUT IT CONTAINS X+STP*S. - -F IS A VARIABLE. ON INPUT IT MUST CONTAIN THE VALUE OF F AT X. ON OUTPUT -IT CONTAINS THE VALUE OF F AT X + STP*S. - -G IS AN ARRAY OF LENGTH N. ON INPUT IT MUST CONTAIN THE GRADIENT OF F AT X. -ON OUTPUT IT CONTAINS THE GRADIENT OF F AT X + STP*S. - -S IS AN INPUT ARRAY OF LENGTH N WHICH SPECIFIES THE SEARCH DIRECTION. - -STP IS A NONNEGATIVE VARIABLE. ON INPUT STP CONTAINS AN INITIAL ESTIMATE -OF A SATISFACTORY STEP. ON OUTPUT STP CONTAINS THE FINAL ESTIMATE. - -FTOL AND GTOL ARE NONNEGATIVE INPUT VARIABLES. TERMINATION OCCURS WHEN THE -SUFFICIENT DECREASE CONDITION AND THE DIRECTIONAL DERIVATIVE CONDITION ARE -SATISFIED. - -XTOL IS A NONNEGATIVE INPUT VARIABLE. TERMINATION OCCURS WHEN THE RELATIVE -WIDTH OF THE INTERVAL OF UNCERTAINTY IS AT MOST XTOL. - -STPMIN AND STPMAX ARE NONNEGATIVE INPUT VARIABLES WHICH SPECIFY LOWER AND -UPPER BOUNDS FOR THE STEP. - -MAXFEV IS A POSITIVE INTEGER INPUT VARIABLE. TERMINATION OCCURS WHEN THE -NUMBER OF CALLS TO FCN IS AT LEAST MAXFEV BY THE END OF AN ITERATION. - -INFO IS AN INTEGER OUTPUT VARIABLE SET AS FOLLOWS: - INFO = 0 IMPROPER INPUT PARAMETERS. - - INFO = 1 THE SUFFICIENT DECREASE CONDITION AND THE - DIRECTIONAL DERIVATIVE CONDITION HOLD. - - INFO = 2 RELATIVE WIDTH OF THE INTERVAL OF UNCERTAINTY - IS AT MOST XTOL. - - INFO = 3 NUMBER OF CALLS TO FCN HAS REACHED MAXFEV. - - INFO = 4 THE STEP IS AT THE LOWER BOUND STPMIN. - - INFO = 5 THE STEP IS AT THE UPPER BOUND STPMAX. - - INFO = 6 ROUNDING ERRORS PREVENT FURTHER PROGRESS. - THERE MAY NOT BE A STEP WHICH SATISFIES THE - SUFFICIENT DECREASE AND CURVATURE CONDITIONS. - TOLERANCES MAY BE TOO SMALL. - -NFEV IS AN INTEGER OUTPUT VARIABLE SET TO THE NUMBER OF CALLS TO FCN. - -WA IS A WORK ARRAY OF LENGTH N. - -ARGONNE NATIONAL LABORATORY. MINPACK PROJECT. JUNE 1983 -JORGE J. MORE', DAVID J. THUENTE -*************************************************************************/ -void mcsrch(ae_int_t n, - /* Real */ ae_vector* x, - double* f, - /* Real */ ae_vector* g, - /* Real */ ae_vector* s, - double* stp, - double stpmax, - double gtol, - ae_int_t* info, - ae_int_t* nfev, - /* Real */ ae_vector* wa, - linminstate* state, - ae_int_t* stage, - ae_state *_state) -{ - ae_int_t i; - double v; - double p5; - double p66; - double zero; - - - - /* - * init - */ - p5 = 0.5; - p66 = 0.66; - state->xtrapf = 4.0; - zero = (double)(0); - if( ae_fp_eq(stpmax,(double)(0)) ) - { - stpmax = linmin_defstpmax; - } - if( ae_fp_less(*stp,linmin_stpmin) ) - { - *stp = linmin_stpmin; - } - if( ae_fp_greater(*stp,stpmax) ) - { - *stp = stpmax; - } - - /* - * Main cycle - */ - for(;;) - { - if( *stage==0 ) + + /* + * Compute the growth in A' * x = b. + */ + if( upper ) { - - /* - * NEXT - */ - *stage = 2; - continue; + jfirst = 1; + jlast = n; + jinc = 1; } - if( *stage==2 ) + else { - state->infoc = 1; - *info = 0; - - /* - * CHECK THE INPUT PARAMETERS FOR ERRORS. - */ - if( ae_fp_less(stpmax,linmin_stpmin)&&ae_fp_greater(stpmax,(double)(0)) ) - { - *info = 5; - *stp = stpmax; - *stage = 0; - return; - } - if( ((((((n<=0||ae_fp_less_eq(*stp,(double)(0)))||ae_fp_less(linmin_ftol,(double)(0)))||ae_fp_less(gtol,zero))||ae_fp_less(linmin_xtol,zero))||ae_fp_less(linmin_stpmin,zero))||ae_fp_less(stpmax,linmin_stpmin))||linmin_maxfev<=0 ) - { - *stage = 0; - return; - } - - /* - * COMPUTE THE INITIAL GRADIENT IN THE SEARCH DIRECTION - * AND CHECK THAT S IS A DESCENT DIRECTION. - */ - v = ae_v_dotproduct(&g->ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); - state->dginit = v; - if( ae_fp_greater_eq(state->dginit,(double)(0)) ) + jfirst = n; + jlast = 1; + jinc = -1; + } + if( ae_fp_neq(tscal,(double)(1)) ) + { + grow = (double)(0); + } + else + { + if( nounit ) { - *stage = 0; - return; - } - - /* - * INITIALIZE LOCAL VARIABLES. - */ - state->brackt = ae_false; - state->stage1 = ae_true; - *nfev = 0; - state->finit = *f; - state->dgtest = linmin_ftol*state->dginit; - state->width = stpmax-linmin_stpmin; - state->width1 = state->width/p5; - ae_v_move(&wa->ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); - - /* - * THE VARIABLES STX, FX, DGX CONTAIN THE VALUES OF THE STEP, - * FUNCTION, AND DIRECTIONAL DERIVATIVE AT THE BEST STEP. - * THE VARIABLES STY, FY, DGY CONTAIN THE VALUE OF THE STEP, - * FUNCTION, AND DERIVATIVE AT THE OTHER ENDPOINT OF - * THE INTERVAL OF UNCERTAINTY. - * THE VARIABLES STP, F, DG CONTAIN THE VALUES OF THE STEP, - * FUNCTION, AND DERIVATIVE AT THE CURRENT STEP. - */ - state->stx = (double)(0); - state->fx = state->finit; - state->dgx = state->dginit; - state->sty = (double)(0); - state->fy = state->finit; - state->dgy = state->dginit; - - /* - * NEXT - */ - *stage = 3; - continue; + + /* + * A is non-unit triangular. + * + * Compute GROW = 1/G(j) and XBND = 1/M(j). + * Initially, M(0) = max{x(i), i=1,...,n}. + */ + grow = 1/ae_maxreal(xbnd, smlnum, _state); + xbnd = grow; + j = jfirst; + while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) + { + + /* + * Exit the loop if the growth factor is too small. + */ + if( ae_fp_less_eq(grow,smlnum) ) + { + break; + } + + /* + * G(j) = max( G(j-1), M(j-1)*( 1 + CNORM(j) ) ) + */ + xj = 1+cnorm->ptr.p_double[j]; + grow = ae_minreal(grow, xbnd/xj, _state); + + /* + * M(j) = M(j-1)*( 1 + CNORM(j) ) / abs(A(j,j)) + */ + tjj = ae_fabs(a->ptr.pp_double[j][j], _state); + if( ae_fp_greater(xj,tjj) ) + { + xbnd = xbnd*(tjj/xj); + } + if( j==jlast ) + { + grow = ae_minreal(grow, xbnd, _state); + } + j = j+jinc; + } + } + else + { + + /* + * A is unit triangular. + * + * Compute GROW = 1/G(j), where G(0) = max{x(i), i=1,...,n}. + */ + grow = ae_minreal((double)(1), 1/ae_maxreal(xbnd, smlnum, _state), _state); + j = jfirst; + while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) + { + + /* + * Exit the loop if the growth factor is too small. + */ + if( ae_fp_less_eq(grow,smlnum) ) + { + break; + } + + /* + * G(j) = ( 1 + CNORM(j) )*G(j-1) + */ + xj = 1+cnorm->ptr.p_double[j]; + grow = grow/xj; + j = j+jinc; + } + } } - if( *stage==3 ) + } + if( ae_fp_greater(grow*tscal,smlnum) ) + { + + /* + * Use the Level 2 BLAS solve if the reciprocal of the bound on + * elements of X is not too small. + */ + if( (upper&¬ran)||(!upper&&!notran) ) { - - /* - * START OF ITERATION. - * - * SET THE MINIMUM AND MAXIMUM STEPS TO CORRESPOND - * TO THE PRESENT INTERVAL OF UNCERTAINTY. - */ - if( state->brackt ) + if( nounit ) { - if( ae_fp_less(state->stx,state->sty) ) + vd = a->ptr.pp_double[n][n]; + } + else + { + vd = (double)(1); + } + x->ptr.p_double[n] = x->ptr.p_double[n]/vd; + for(i=n-1; i>=1; i--) + { + ip1 = i+1; + if( upper ) { - state->stmin = state->stx; - state->stmax = state->sty; + v = ae_v_dotproduct(&a->ptr.pp_double[i][ip1], 1, &x->ptr.p_double[ip1], 1, ae_v_len(ip1,n)); } else { - state->stmin = state->sty; - state->stmax = state->stx; + v = ae_v_dotproduct(&a->ptr.pp_double[ip1][i], a->stride, &x->ptr.p_double[ip1], 1, ae_v_len(ip1,n)); } + if( nounit ) + { + vd = a->ptr.pp_double[i][i]; + } + else + { + vd = (double)(1); + } + x->ptr.p_double[i] = (x->ptr.p_double[i]-v)/vd; } - else - { - state->stmin = state->stx; - state->stmax = *stp+state->xtrapf*(*stp-state->stx); - } - - /* - * FORCE THE STEP TO BE WITHIN THE BOUNDS STPMAX AND STPMIN. - */ - if( ae_fp_greater(*stp,stpmax) ) + } + else + { + if( nounit ) { - *stp = stpmax; + vd = a->ptr.pp_double[1][1]; } - if( ae_fp_less(*stp,linmin_stpmin) ) + else { - *stp = linmin_stpmin; + vd = (double)(1); } - - /* - * IF AN UNUSUAL TERMINATION IS TO OCCUR THEN LET - * STP BE THE LOWEST POINT OBTAINED SO FAR. - */ - if( (((state->brackt&&(ae_fp_less_eq(*stp,state->stmin)||ae_fp_greater_eq(*stp,state->stmax)))||*nfev>=linmin_maxfev-1)||state->infoc==0)||(state->brackt&&ae_fp_less_eq(state->stmax-state->stmin,linmin_xtol*state->stmax)) ) + x->ptr.p_double[1] = x->ptr.p_double[1]/vd; + for(i=2; i<=n; i++) { - *stp = state->stx; + im1 = i-1; + if( upper ) + { + v = ae_v_dotproduct(&a->ptr.pp_double[1][i], a->stride, &x->ptr.p_double[1], 1, ae_v_len(1,im1)); + } + else + { + v = ae_v_dotproduct(&a->ptr.pp_double[i][1], 1, &x->ptr.p_double[1], 1, ae_v_len(1,im1)); + } + if( nounit ) + { + vd = a->ptr.pp_double[i][i]; + } + else + { + vd = (double)(1); + } + x->ptr.p_double[i] = (x->ptr.p_double[i]-v)/vd; } + } + } + else + { + + /* + * Use a Level 1 BLAS solve, scaling intermediate results. + */ + if( ae_fp_greater(xmax,bignum) ) + { /* - * EVALUATE THE FUNCTION AND GRADIENT AT STP - * AND COMPUTE THE DIRECTIONAL DERIVATIVE. - */ - ae_v_move(&x->ptr.p_double[0], 1, &wa->ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_v_addd(&x->ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1), *stp); - - /* - * NEXT + * Scale X so that its components are less than or equal to + * BIGNUM in absolute value. */ - *stage = 4; - return; + *s = bignum/xmax; + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), *s); + xmax = bignum; } - if( *stage==4 ) + if( notran ) { - *info = 0; - *nfev = *nfev+1; - v = ae_v_dotproduct(&g->ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); - state->dg = v; - state->ftest1 = state->finit+*stp*state->dgtest; /* - * TEST FOR CONVERGENCE. + * Solve A * x = b */ - if( (state->brackt&&(ae_fp_less_eq(*stp,state->stmin)||ae_fp_greater_eq(*stp,state->stmax)))||state->infoc==0 ) - { - *info = 6; - } - if( ((ae_fp_eq(*stp,stpmax)&&ae_fp_less(*f,state->finit))&&ae_fp_less_eq(*f,state->ftest1))&&ae_fp_less_eq(state->dg,state->dgtest) ) - { - *info = 5; - } - if( ae_fp_eq(*stp,linmin_stpmin)&&((ae_fp_greater_eq(*f,state->finit)||ae_fp_greater(*f,state->ftest1))||ae_fp_greater_eq(state->dg,state->dgtest)) ) - { - *info = 4; - } - if( *nfev>=linmin_maxfev ) - { - *info = 3; - } - if( state->brackt&&ae_fp_less_eq(state->stmax-state->stmin,linmin_xtol*state->stmax) ) - { - *info = 2; - } - if( (ae_fp_less(*f,state->finit)&&ae_fp_less_eq(*f,state->ftest1))&&ae_fp_less_eq(ae_fabs(state->dg, _state),-gtol*state->dginit) ) - { - *info = 1; - } - - /* - * CHECK FOR TERMINATION. - */ - if( *info!=0 ) + j = jfirst; + while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) { /* - * Check guarantees provided by the function for INFO=1 or INFO=5 + * Compute x(j) = b(j) / A(j,j), scaling x if necessary. */ - if( *info==1||*info==5 ) + xj = ae_fabs(x->ptr.p_double[j], _state); + flg = 0; + if( nounit ) { - v = 0.0; - for(i=0; i<=n-1; i++) + tjjs = a->ptr.pp_double[j][j]*tscal; + } + else + { + tjjs = tscal; + if( ae_fp_eq(tscal,(double)(1)) ) { - v = v+(wa->ptr.p_double[i]-x->ptr.p_double[i])*(wa->ptr.p_double[i]-x->ptr.p_double[i]); + flg = 100; } - if( ae_fp_greater_eq(*f,state->finit)||ae_fp_eq(v,0.0) ) + } + if( flg!=100 ) + { + tjj = ae_fabs(tjjs, _state); + if( ae_fp_greater(tjj,smlnum) ) { - *info = 6; + + /* + * abs(A(j,j)) > SMLNUM: + */ + if( ae_fp_less(tjj,(double)(1)) ) + { + if( ae_fp_greater(xj,tjj*bignum) ) + { + + /* + * Scale x by 1/b(j). + */ + rec = 1/xj; + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); + *s = *s*rec; + xmax = xmax*rec; + } + } + x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; + xj = ae_fabs(x->ptr.p_double[j], _state); + } + else + { + if( ae_fp_greater(tjj,(double)(0)) ) + { + + /* + * 0 < abs(A(j,j)) <= SMLNUM: + */ + if( ae_fp_greater(xj,tjj*bignum) ) + { + + /* + * Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM + * to avoid overflow when dividing by A(j,j). + */ + rec = tjj*bignum/xj; + if( ae_fp_greater(cnorm->ptr.p_double[j],(double)(1)) ) + { + + /* + * Scale by 1/CNORM(j) to avoid overflow when + * multiplying x(j) times column j. + */ + rec = rec/cnorm->ptr.p_double[j]; + } + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); + *s = *s*rec; + xmax = xmax*rec; + } + x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; + xj = ae_fabs(x->ptr.p_double[j], _state); + } + else + { + + /* + * A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and + * scale = 0, and compute a solution to A*x = 0. + */ + for(i=1; i<=n; i++) + { + x->ptr.p_double[i] = (double)(0); + } + x->ptr.p_double[j] = (double)(1); + xj = (double)(1); + *s = (double)(0); + xmax = (double)(0); + } } } - *stage = 0; - return; - } - - /* - * IN THE FIRST STAGE WE SEEK A STEP FOR WHICH THE MODIFIED - * FUNCTION HAS A NONPOSITIVE VALUE AND NONNEGATIVE DERIVATIVE. - */ - if( (state->stage1&&ae_fp_less_eq(*f,state->ftest1))&&ae_fp_greater_eq(state->dg,ae_minreal(linmin_ftol, gtol, _state)*state->dginit) ) - { - state->stage1 = ae_false; - } - - /* - * A MODIFIED FUNCTION IS USED TO PREDICT THE STEP ONLY IF - * WE HAVE NOT OBTAINED A STEP FOR WHICH THE MODIFIED - * FUNCTION HAS A NONPOSITIVE FUNCTION VALUE AND NONNEGATIVE - * DERIVATIVE, AND IF A LOWER FUNCTION VALUE HAS BEEN - * OBTAINED BUT THE DECREASE IS NOT SUFFICIENT. - */ - if( (state->stage1&&ae_fp_less_eq(*f,state->fx))&&ae_fp_greater(*f,state->ftest1) ) - { - - /* - * DEFINE THE MODIFIED FUNCTION AND DERIVATIVE VALUES. - */ - state->fm = *f-*stp*state->dgtest; - state->fxm = state->fx-state->stx*state->dgtest; - state->fym = state->fy-state->sty*state->dgtest; - state->dgm = state->dg-state->dgtest; - state->dgxm = state->dgx-state->dgtest; - state->dgym = state->dgy-state->dgtest; - - /* - * CALL CSTEP TO UPDATE THE INTERVAL OF UNCERTAINTY - * AND TO COMPUTE THE NEW STEP. - */ - linmin_mcstep(&state->stx, &state->fxm, &state->dgxm, &state->sty, &state->fym, &state->dgym, stp, state->fm, state->dgm, &state->brackt, state->stmin, state->stmax, &state->infoc, _state); - - /* - * RESET THE FUNCTION AND GRADIENT VALUES FOR F. - */ - state->fx = state->fxm+state->stx*state->dgtest; - state->fy = state->fym+state->sty*state->dgtest; - state->dgx = state->dgxm+state->dgtest; - state->dgy = state->dgym+state->dgtest; - } - else - { /* - * CALL MCSTEP TO UPDATE THE INTERVAL OF UNCERTAINTY - * AND TO COMPUTE THE NEW STEP. + * Scale x if necessary to avoid overflow when adding a + * multiple of column j of A. */ - linmin_mcstep(&state->stx, &state->fx, &state->dgx, &state->sty, &state->fy, &state->dgy, stp, *f, state->dg, &state->brackt, state->stmin, state->stmax, &state->infoc, _state); - } - - /* - * FORCE A SUFFICIENT DECREASE IN THE SIZE OF THE - * INTERVAL OF UNCERTAINTY. - */ - if( state->brackt ) - { - if( ae_fp_greater_eq(ae_fabs(state->sty-state->stx, _state),p66*state->width1) ) + if( ae_fp_greater(xj,(double)(1)) ) { - *stp = state->stx+p5*(state->sty-state->stx); + rec = 1/xj; + if( ae_fp_greater(cnorm->ptr.p_double[j],(bignum-xmax)*rec) ) + { + + /* + * Scale x by 1/(2*abs(x(j))). + */ + rec = rec*0.5; + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); + *s = *s*rec; + } } - state->width1 = state->width; - state->width = ae_fabs(state->sty-state->stx, _state); + else + { + if( ae_fp_greater(xj*cnorm->ptr.p_double[j],bignum-xmax) ) + { + + /* + * Scale x by 1/2. + */ + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), 0.5); + *s = *s*0.5; + } + } + if( upper ) + { + if( j>1 ) + { + + /* + * Compute the update + * x(1:j-1) := x(1:j-1) - x(j) * A(1:j-1,j) + */ + v = x->ptr.p_double[j]*tscal; + jm1 = j-1; + ae_v_subd(&x->ptr.p_double[1], 1, &a->ptr.pp_double[1][j], a->stride, ae_v_len(1,jm1), v); + i = 1; + for(k=2; k<=j-1; k++) + { + if( ae_fp_greater(ae_fabs(x->ptr.p_double[k], _state),ae_fabs(x->ptr.p_double[i], _state)) ) + { + i = k; + } + } + xmax = ae_fabs(x->ptr.p_double[i], _state); + } + } + else + { + if( jptr.p_double[j]*tscal; + ae_v_subd(&x->ptr.p_double[jp1], 1, &a->ptr.pp_double[jp1][j], a->stride, ae_v_len(jp1,n), v); + i = j+1; + for(k=j+2; k<=n; k++) + { + if( ae_fp_greater(ae_fabs(x->ptr.p_double[k], _state),ae_fabs(x->ptr.p_double[i], _state)) ) + { + i = k; + } + } + xmax = ae_fabs(x->ptr.p_double[i], _state); + } + } + j = j+jinc; } + } + else + { /* - * NEXT. + * Solve A' * x = b */ - *stage = 3; - continue; - } + j = jfirst; + while((jinc>0&&j<=jlast)||(jinc<0&&j>=jlast)) + { + + /* + * Compute x(j) = b(j) - sum A(k,j)*x(k). + * k<>j + */ + xj = ae_fabs(x->ptr.p_double[j], _state); + uscal = tscal; + rec = 1/ae_maxreal(xmax, (double)(1), _state); + if( ae_fp_greater(cnorm->ptr.p_double[j],(bignum-xj)*rec) ) + { + + /* + * If x(j) could overflow, scale x by 1/(2*XMAX). + */ + rec = rec*0.5; + if( nounit ) + { + tjjs = a->ptr.pp_double[j][j]*tscal; + } + else + { + tjjs = tscal; + } + tjj = ae_fabs(tjjs, _state); + if( ae_fp_greater(tjj,(double)(1)) ) + { + + /* + * Divide by A(j,j) when scaling x if A(j,j) > 1. + */ + rec = ae_minreal((double)(1), rec*tjj, _state); + uscal = uscal/tjjs; + } + if( ae_fp_less(rec,(double)(1)) ) + { + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); + *s = *s*rec; + xmax = xmax*rec; + } + } + sumj = (double)(0); + if( ae_fp_eq(uscal,(double)(1)) ) + { + + /* + * If the scaling needed for A in the dot product is 1, + * call DDOT to perform the dot product. + */ + if( upper ) + { + if( j>1 ) + { + jm1 = j-1; + sumj = ae_v_dotproduct(&a->ptr.pp_double[1][j], a->stride, &x->ptr.p_double[1], 1, ae_v_len(1,jm1)); + } + else + { + sumj = (double)(0); + } + } + else + { + if( jptr.pp_double[jp1][j], a->stride, &x->ptr.p_double[jp1], 1, ae_v_len(jp1,n)); + } + } + } + else + { + + /* + * Otherwise, use in-line code for the dot product. + */ + if( upper ) + { + for(i=1; i<=j-1; i++) + { + v = a->ptr.pp_double[i][j]*uscal; + sumj = sumj+v*x->ptr.p_double[i]; + } + } + else + { + if( jptr.pp_double[i][j]*uscal; + sumj = sumj+v*x->ptr.p_double[i]; + } + } + } + } + if( ae_fp_eq(uscal,tscal) ) + { + + /* + * Compute x(j) := ( x(j) - sumj ) / A(j,j) if 1/A(j,j) + * was not used to scale the dotproduct. + */ + x->ptr.p_double[j] = x->ptr.p_double[j]-sumj; + xj = ae_fabs(x->ptr.p_double[j], _state); + flg = 0; + if( nounit ) + { + tjjs = a->ptr.pp_double[j][j]*tscal; + } + else + { + tjjs = tscal; + if( ae_fp_eq(tscal,(double)(1)) ) + { + flg = 150; + } + } + + /* + * Compute x(j) = x(j) / A(j,j), scaling if necessary. + */ + if( flg!=150 ) + { + tjj = ae_fabs(tjjs, _state); + if( ae_fp_greater(tjj,smlnum) ) + { + + /* + * abs(A(j,j)) > SMLNUM: + */ + if( ae_fp_less(tjj,(double)(1)) ) + { + if( ae_fp_greater(xj,tjj*bignum) ) + { + + /* + * Scale X by 1/abs(x(j)). + */ + rec = 1/xj; + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); + *s = *s*rec; + xmax = xmax*rec; + } + } + x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; + } + else + { + if( ae_fp_greater(tjj,(double)(0)) ) + { + + /* + * 0 < abs(A(j,j)) <= SMLNUM: + */ + if( ae_fp_greater(xj,tjj*bignum) ) + { + + /* + * Scale x by (1/abs(x(j)))*abs(A(j,j))*BIGNUM. + */ + rec = tjj*bignum/xj; + ae_v_muld(&x->ptr.p_double[1], 1, ae_v_len(1,n), rec); + *s = *s*rec; + xmax = xmax*rec; + } + x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs; + } + else + { + + /* + * A(j,j) = 0: Set x(1:n) = 0, x(j) = 1, and + * scale = 0, and compute a solution to A'*x = 0. + */ + for(i=1; i<=n; i++) + { + x->ptr.p_double[i] = (double)(0); + } + x->ptr.p_double[j] = (double)(1); + *s = (double)(0); + xmax = (double)(0); + } + } + } + } + else + { + + /* + * Compute x(j) := x(j) / A(j,j) - sumj if the dot + * product has already been divided by 1/A(j,j). + */ + x->ptr.p_double[j] = x->ptr.p_double[j]/tjjs-sumj; + } + xmax = ae_maxreal(xmax, ae_fabs(x->ptr.p_double[j], _state), _state); + j = j+jinc; + } + } + *s = *s/tscal; + } + + /* + * Scale the column norms by 1/TSCAL for return. + */ + if( ae_fp_neq(tscal,(double)(1)) ) + { + v = 1/tscal; + ae_v_muld(&cnorm->ptr.p_double[1], 1, ae_v_len(1,n), v); + } +} + + +#endif +#if defined(AE_COMPILE_SAFESOLVE) || !defined(AE_PARTIAL_BUILD) + + +/************************************************************************* +Real implementation of CMatrixScaledTRSafeSolve + + -- ALGLIB routine -- + 21.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool rmatrixscaledtrsafesolve(/* Real */ ae_matrix* a, + double sa, + ae_int_t n, + /* Real */ ae_vector* x, + ae_bool isupper, + ae_int_t trans, + ae_bool isunit, + double maxgrowth, + ae_state *_state) +{ + ae_frame _frame_block; + double lnmax; + double nrmb; + double nrmx; + ae_int_t i; + ae_complex alpha; + ae_complex beta; + double vr; + ae_complex cx; + ae_vector tmp; + ae_bool result; + + ae_frame_make(_state, &_frame_block); + memset(&tmp, 0, sizeof(tmp)); + ae_vector_init(&tmp, 0, DT_REAL, _state, ae_true); + + ae_assert(n>0, "RMatrixTRSafeSolve: incorrect N!", _state); + ae_assert(trans==0||trans==1, "RMatrixTRSafeSolve: incorrect Trans!", _state); + result = ae_true; + lnmax = ae_log(ae_maxrealnumber, _state); + + /* + * Quick return if possible + */ + if( n<=0 ) + { + ae_frame_leave(_state); + return result; + } + + /* + * Load norms: right part and X + */ + nrmb = (double)(0); + for(i=0; i<=n-1; i++) + { + nrmb = ae_maxreal(nrmb, ae_fabs(x->ptr.p_double[i], _state), _state); + } + nrmx = (double)(0); + + /* + * Solve + */ + ae_vector_set_length(&tmp, n, _state); + result = ae_true; + if( isupper&&trans==0 ) + { + + /* + * U*x = b + */ + for(i=n-1; i>=0; i--) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); + } + if( iptr.pp_double[i][i+1], 1, ae_v_len(i+1,n-1), sa); + vr = ae_v_dotproduct(&tmp.ptr.p_double[i+1], 1, &x->ptr.p_double[i+1], 1, ae_v_len(i+1,n-1)); + beta = ae_complex_from_d(x->ptr.p_double[i]-vr); + } + else + { + beta = ae_complex_from_d(x->ptr.p_double[i]); + } + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_double[i] = cx.x; + } + ae_frame_leave(_state); + return result; + } + if( !isupper&&trans==0 ) + { + + /* + * L*x = b + */ + for(i=0; i<=n-1; i++) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); + } + if( i>0 ) + { + ae_v_moved(&tmp.ptr.p_double[0], 1, &a->ptr.pp_double[i][0], 1, ae_v_len(0,i-1), sa); + vr = ae_v_dotproduct(&tmp.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,i-1)); + beta = ae_complex_from_d(x->ptr.p_double[i]-vr); + } + else + { + beta = ae_complex_from_d(x->ptr.p_double[i]); + } + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_double[i] = cx.x; + } + ae_frame_leave(_state); + return result; + } + if( isupper&&trans==1 ) + { + + /* + * U^T*x = b + */ + for(i=0; i<=n-1; i++) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); + } + beta = ae_complex_from_d(x->ptr.p_double[i]); + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_double[i] = cx.x; + + /* + * update the rest of right part + */ + if( iptr.pp_double[i][i+1], 1, ae_v_len(i+1,n-1), sa); + ae_v_subd(&x->ptr.p_double[i+1], 1, &tmp.ptr.p_double[i+1], 1, ae_v_len(i+1,n-1), vr); + } + } + ae_frame_leave(_state); + return result; + } + if( !isupper&&trans==1 ) + { + + /* + * L^T*x = b + */ + for(i=n-1; i>=0; i--) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_complex_from_d(a->ptr.pp_double[i][i]*sa); + } + beta = ae_complex_from_d(x->ptr.p_double[i]); + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &cx, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_double[i] = cx.x; + + /* + * update the rest of right part + */ + if( i>0 ) + { + vr = cx.x; + ae_v_moved(&tmp.ptr.p_double[0], 1, &a->ptr.pp_double[i][0], 1, ae_v_len(0,i-1), sa); + ae_v_subd(&x->ptr.p_double[0], 1, &tmp.ptr.p_double[0], 1, ae_v_len(0,i-1), vr); + } + } + ae_frame_leave(_state); + return result; + } + result = ae_false; + ae_frame_leave(_state); + return result; +} + + +/************************************************************************* +Internal subroutine for safe solution of + + SA*op(A)=b + +where A is NxN upper/lower triangular/unitriangular matrix, op(A) is +either identity transform, transposition or Hermitian transposition, SA is +a scaling factor such that max(|SA*A[i,j]|) is close to 1.0 in magnutude. + +This subroutine limits relative growth of solution (in inf-norm) by +MaxGrowth, returning False if growth exceeds MaxGrowth. Degenerate or +near-degenerate matrices are handled correctly (False is returned) as long +as MaxGrowth is significantly less than MaxRealNumber/norm(b). + + -- ALGLIB routine -- + 21.01.2010 + Bochkanov Sergey +*************************************************************************/ +ae_bool cmatrixscaledtrsafesolve(/* Complex */ ae_matrix* a, + double sa, + ae_int_t n, + /* Complex */ ae_vector* x, + ae_bool isupper, + ae_int_t trans, + ae_bool isunit, + double maxgrowth, + ae_state *_state) +{ + ae_frame _frame_block; + double lnmax; + double nrmb; + double nrmx; + ae_int_t i; + ae_complex alpha; + ae_complex beta; + ae_complex vc; + ae_vector tmp; + ae_bool result; + + ae_frame_make(_state, &_frame_block); + memset(&tmp, 0, sizeof(tmp)); + ae_vector_init(&tmp, 0, DT_COMPLEX, _state, ae_true); + + ae_assert(n>0, "CMatrixTRSafeSolve: incorrect N!", _state); + ae_assert((trans==0||trans==1)||trans==2, "CMatrixTRSafeSolve: incorrect Trans!", _state); + result = ae_true; + lnmax = ae_log(ae_maxrealnumber, _state); + + /* + * Quick return if possible + */ + if( n<=0 ) + { + ae_frame_leave(_state); + return result; + } + + /* + * Load norms: right part and X + */ + nrmb = (double)(0); + for(i=0; i<=n-1; i++) + { + nrmb = ae_maxreal(nrmb, ae_c_abs(x->ptr.p_complex[i], _state), _state); + } + nrmx = (double)(0); + + /* + * Solve + */ + ae_vector_set_length(&tmp, n, _state); + result = ae_true; + if( isupper&&trans==0 ) + { + + /* + * U*x = b + */ + for(i=n-1; i>=0; i--) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); + } + if( iptr.pp_complex[i][i+1], 1, "N", ae_v_len(i+1,n-1), sa); + vc = ae_v_cdotproduct(&tmp.ptr.p_complex[i+1], 1, "N", &x->ptr.p_complex[i+1], 1, "N", ae_v_len(i+1,n-1)); + beta = ae_c_sub(x->ptr.p_complex[i],vc); + } + else + { + beta = x->ptr.p_complex[i]; + } + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_complex[i] = vc; + } + ae_frame_leave(_state); + return result; + } + if( !isupper&&trans==0 ) + { + + /* + * L*x = b + */ + for(i=0; i<=n-1; i++) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); + } + if( i>0 ) + { + ae_v_cmoved(&tmp.ptr.p_complex[0], 1, &a->ptr.pp_complex[i][0], 1, "N", ae_v_len(0,i-1), sa); + vc = ae_v_cdotproduct(&tmp.ptr.p_complex[0], 1, "N", &x->ptr.p_complex[0], 1, "N", ae_v_len(0,i-1)); + beta = ae_c_sub(x->ptr.p_complex[i],vc); + } + else + { + beta = x->ptr.p_complex[i]; + } + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_complex[i] = vc; + } + ae_frame_leave(_state); + return result; + } + if( isupper&&trans==1 ) + { + + /* + * U^T*x = b + */ + for(i=0; i<=n-1; i++) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); + } + beta = x->ptr.p_complex[i]; + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_complex[i] = vc; + + /* + * update the rest of right part + */ + if( iptr.pp_complex[i][i+1], 1, "N", ae_v_len(i+1,n-1), sa); + ae_v_csubc(&x->ptr.p_complex[i+1], 1, &tmp.ptr.p_complex[i+1], 1, "N", ae_v_len(i+1,n-1), vc); + } + } + ae_frame_leave(_state); + return result; + } + if( !isupper&&trans==1 ) + { + + /* + * L^T*x = b + */ + for(i=n-1; i>=0; i--) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_c_mul_d(a->ptr.pp_complex[i][i],sa); + } + beta = x->ptr.p_complex[i]; + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_complex[i] = vc; + + /* + * update the rest of right part + */ + if( i>0 ) + { + ae_v_cmoved(&tmp.ptr.p_complex[0], 1, &a->ptr.pp_complex[i][0], 1, "N", ae_v_len(0,i-1), sa); + ae_v_csubc(&x->ptr.p_complex[0], 1, &tmp.ptr.p_complex[0], 1, "N", ae_v_len(0,i-1), vc); + } + } + ae_frame_leave(_state); + return result; + } + if( isupper&&trans==2 ) + { + + /* + * U^H*x = b + */ + for(i=0; i<=n-1; i++) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_c_mul_d(ae_c_conj(a->ptr.pp_complex[i][i], _state),sa); + } + beta = x->ptr.p_complex[i]; + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_complex[i] = vc; + + /* + * update the rest of right part + */ + if( iptr.pp_complex[i][i+1], 1, "Conj", ae_v_len(i+1,n-1), sa); + ae_v_csubc(&x->ptr.p_complex[i+1], 1, &tmp.ptr.p_complex[i+1], 1, "N", ae_v_len(i+1,n-1), vc); + } + } + ae_frame_leave(_state); + return result; + } + if( !isupper&&trans==2 ) + { + + /* + * L^T*x = b + */ + for(i=n-1; i>=0; i--) + { + + /* + * Task is reduced to alpha*x[i] = beta + */ + if( isunit ) + { + alpha = ae_complex_from_d(sa); + } + else + { + alpha = ae_c_mul_d(ae_c_conj(a->ptr.pp_complex[i][i], _state),sa); + } + beta = x->ptr.p_complex[i]; + + /* + * solve alpha*x[i] = beta + */ + result = safesolve_cbasicsolveandupdate(alpha, beta, lnmax, nrmb, maxgrowth, &nrmx, &vc, _state); + if( !result ) + { + ae_frame_leave(_state); + return result; + } + x->ptr.p_complex[i] = vc; + + /* + * update the rest of right part + */ + if( i>0 ) + { + ae_v_cmoved(&tmp.ptr.p_complex[0], 1, &a->ptr.pp_complex[i][0], 1, "Conj", ae_v_len(0,i-1), sa); + ae_v_csubc(&x->ptr.p_complex[0], 1, &tmp.ptr.p_complex[0], 1, "N", ae_v_len(0,i-1), vc); + } + } + ae_frame_leave(_state); + return result; + } + result = ae_false; + ae_frame_leave(_state); + return result; +} + + +/************************************************************************* +complex basic solver-updater for reduced linear system + + alpha*x[i] = beta + +solves this equation and updates it in overlfow-safe manner (keeping track +of relative growth of solution). + +Parameters: + Alpha - alpha + Beta - beta + LnMax - precomputed Ln(MaxRealNumber) + BNorm - inf-norm of b (right part of original system) + MaxGrowth- maximum growth of norm(x) relative to norm(b) + XNorm - inf-norm of other components of X (which are already processed) + it is updated by CBasicSolveAndUpdate. + X - solution + + -- ALGLIB routine -- + 26.01.2009 + Bochkanov Sergey +*************************************************************************/ +static ae_bool safesolve_cbasicsolveandupdate(ae_complex alpha, + ae_complex beta, + double lnmax, + double bnorm, + double maxgrowth, + double* xnorm, + ae_complex* x, + ae_state *_state) +{ + double v; + ae_bool result; + + x->x = 0; + x->y = 0; + + result = ae_false; + if( ae_c_eq_d(alpha,(double)(0)) ) + { + return result; + } + if( ae_c_neq_d(beta,(double)(0)) ) + { + + /* + * alpha*x[i]=beta + */ + v = ae_log(ae_c_abs(beta, _state), _state)-ae_log(ae_c_abs(alpha, _state), _state); + if( ae_fp_greater(v,lnmax) ) + { + return result; + } + *x = ae_c_div(beta,alpha); + } + else + { + + /* + * alpha*x[i]=0 + */ + *x = ae_complex_from_i(0); + } + + /* + * update NrmX, test growth limit + */ + *xnorm = ae_maxreal(*xnorm, ae_c_abs(*x, _state), _state); + if( ae_fp_greater(*xnorm,maxgrowth*bnorm) ) + { + return result; + } + result = ae_true; + return result; +} + + +#endif +#if defined(AE_COMPILE_HBLAS) || !defined(AE_PARTIAL_BUILD) + + +void hermitianmatrixvectormultiply(/* Complex */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Complex */ ae_vector* x, + ae_complex alpha, + /* Complex */ ae_vector* y, + ae_state *_state) +{ + ae_int_t i; + ae_int_t ba1; + ae_int_t by1; + ae_int_t by2; + ae_int_t bx1; + ae_int_t bx2; + ae_int_t n; + ae_complex v; + + + n = i2-i1+1; + if( n<=0 ) + { + return; + } + + /* + * Let A = L + D + U, where + * L is strictly lower triangular (main diagonal is zero) + * D is diagonal + * U is strictly upper triangular (main diagonal is zero) + * + * A*x = L*x + D*x + U*x + * + * Calculate D*x first + */ + for(i=i1; i<=i2; i++) + { + y->ptr.p_complex[i-i1+1] = ae_c_mul(a->ptr.pp_complex[i][i],x->ptr.p_complex[i-i1+1]); + } + + /* + * Add L*x + U*x + */ + if( isupper ) + { + for(i=i1; i<=i2-1; i++) + { + + /* + * Add L*x to the result + */ + v = x->ptr.p_complex[i-i1+1]; + by1 = i-i1+2; + by2 = n; + ba1 = i+1; + ae_v_caddc(&y->ptr.p_complex[by1], 1, &a->ptr.pp_complex[i][ba1], 1, "Conj", ae_v_len(by1,by2), v); + + /* + * Add U*x to the result + */ + bx1 = i-i1+2; + bx2 = n; + ba1 = i+1; + v = ae_v_cdotproduct(&x->ptr.p_complex[bx1], 1, "N", &a->ptr.pp_complex[i][ba1], 1, "N", ae_v_len(bx1,bx2)); + y->ptr.p_complex[i-i1+1] = ae_c_add(y->ptr.p_complex[i-i1+1],v); + } + } + else + { + for(i=i1+1; i<=i2; i++) + { + + /* + * Add L*x to the result + */ + bx1 = 1; + bx2 = i-i1; + ba1 = i1; + v = ae_v_cdotproduct(&x->ptr.p_complex[bx1], 1, "N", &a->ptr.pp_complex[i][ba1], 1, "N", ae_v_len(bx1,bx2)); + y->ptr.p_complex[i-i1+1] = ae_c_add(y->ptr.p_complex[i-i1+1],v); + + /* + * Add U*x to the result + */ + v = x->ptr.p_complex[i-i1+1]; + by1 = 1; + by2 = i-i1; + ba1 = i1; + ae_v_caddc(&y->ptr.p_complex[by1], 1, &a->ptr.pp_complex[i][ba1], 1, "Conj", ae_v_len(by1,by2), v); + } + } + ae_v_cmulc(&y->ptr.p_complex[1], 1, ae_v_len(1,n), alpha); +} + + +void hermitianrank2update(/* Complex */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Complex */ ae_vector* x, + /* Complex */ ae_vector* y, + /* Complex */ ae_vector* t, + ae_complex alpha, + ae_state *_state) +{ + ae_int_t i; + ae_int_t tp1; + ae_int_t tp2; + ae_complex v; + + + if( isupper ) + { + for(i=i1; i<=i2; i++) + { + tp1 = i+1-i1; + tp2 = i2-i1+1; + v = ae_c_mul(alpha,x->ptr.p_complex[i+1-i1]); + ae_v_cmovec(&t->ptr.p_complex[tp1], 1, &y->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); + v = ae_c_mul(ae_c_conj(alpha, _state),y->ptr.p_complex[i+1-i1]); + ae_v_caddc(&t->ptr.p_complex[tp1], 1, &x->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); + ae_v_cadd(&a->ptr.pp_complex[i][i], 1, &t->ptr.p_complex[tp1], 1, "N", ae_v_len(i,i2)); + } + } + else + { + for(i=i1; i<=i2; i++) + { + tp1 = 1; + tp2 = i+1-i1; + v = ae_c_mul(alpha,x->ptr.p_complex[i+1-i1]); + ae_v_cmovec(&t->ptr.p_complex[tp1], 1, &y->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); + v = ae_c_mul(ae_c_conj(alpha, _state),y->ptr.p_complex[i+1-i1]); + ae_v_caddc(&t->ptr.p_complex[tp1], 1, &x->ptr.p_complex[tp1], 1, "Conj", ae_v_len(tp1,tp2), v); + ae_v_cadd(&a->ptr.pp_complex[i][i1], 1, &t->ptr.p_complex[tp1], 1, "N", ae_v_len(i1,i)); + } + } +} + + +#endif +#if defined(AE_COMPILE_SBLAS) || !defined(AE_PARTIAL_BUILD) + + +void symmetricmatrixvectormultiply(/* Real */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Real */ ae_vector* x, + double alpha, + /* Real */ ae_vector* y, + ae_state *_state) +{ + ae_int_t i; + ae_int_t ba1; + ae_int_t ba2; + ae_int_t by1; + ae_int_t by2; + ae_int_t bx1; + ae_int_t bx2; + ae_int_t n; + double v; + + + n = i2-i1+1; + if( n<=0 ) + { + return; + } + + /* + * Let A = L + D + U, where + * L is strictly lower triangular (main diagonal is zero) + * D is diagonal + * U is strictly upper triangular (main diagonal is zero) + * + * A*x = L*x + D*x + U*x + * + * Calculate D*x first + */ + for(i=i1; i<=i2; i++) + { + y->ptr.p_double[i-i1+1] = a->ptr.pp_double[i][i]*x->ptr.p_double[i-i1+1]; + } + + /* + * Add L*x + U*x + */ + if( isupper ) + { + for(i=i1; i<=i2-1; i++) + { + + /* + * Add L*x to the result + */ + v = x->ptr.p_double[i-i1+1]; + by1 = i-i1+2; + by2 = n; + ba1 = i+1; + ba2 = i2; + ae_v_addd(&y->ptr.p_double[by1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(by1,by2), v); + + /* + * Add U*x to the result + */ + bx1 = i-i1+2; + bx2 = n; + ba1 = i+1; + ba2 = i2; + v = ae_v_dotproduct(&x->ptr.p_double[bx1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(bx1,bx2)); + y->ptr.p_double[i-i1+1] = y->ptr.p_double[i-i1+1]+v; + } + } + else + { + for(i=i1+1; i<=i2; i++) + { + + /* + * Add L*x to the result + */ + bx1 = 1; + bx2 = i-i1; + ba1 = i1; + ba2 = i-1; + v = ae_v_dotproduct(&x->ptr.p_double[bx1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(bx1,bx2)); + y->ptr.p_double[i-i1+1] = y->ptr.p_double[i-i1+1]+v; + + /* + * Add U*x to the result + */ + v = x->ptr.p_double[i-i1+1]; + by1 = 1; + by2 = i-i1; + ba1 = i1; + ba2 = i-1; + ae_v_addd(&y->ptr.p_double[by1], 1, &a->ptr.pp_double[i][ba1], 1, ae_v_len(by1,by2), v); + } + } + ae_v_muld(&y->ptr.p_double[1], 1, ae_v_len(1,n), alpha); + touchint(&ba2, _state); +} + + +void symmetricrank2update(/* Real */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Real */ ae_vector* x, + /* Real */ ae_vector* y, + /* Real */ ae_vector* t, + double alpha, + ae_state *_state) +{ + ae_int_t i; + ae_int_t tp1; + ae_int_t tp2; + double v; + + + if( isupper ) + { + for(i=i1; i<=i2; i++) + { + tp1 = i+1-i1; + tp2 = i2-i1+1; + v = x->ptr.p_double[i+1-i1]; + ae_v_moved(&t->ptr.p_double[tp1], 1, &y->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); + v = y->ptr.p_double[i+1-i1]; + ae_v_addd(&t->ptr.p_double[tp1], 1, &x->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); + ae_v_muld(&t->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), alpha); + ae_v_add(&a->ptr.pp_double[i][i], 1, &t->ptr.p_double[tp1], 1, ae_v_len(i,i2)); + } + } + else + { + for(i=i1; i<=i2; i++) + { + tp1 = 1; + tp2 = i+1-i1; + v = x->ptr.p_double[i+1-i1]; + ae_v_moved(&t->ptr.p_double[tp1], 1, &y->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); + v = y->ptr.p_double[i+1-i1]; + ae_v_addd(&t->ptr.p_double[tp1], 1, &x->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), v); + ae_v_muld(&t->ptr.p_double[tp1], 1, ae_v_len(tp1,tp2), alpha); + ae_v_add(&a->ptr.pp_double[i][i1], 1, &t->ptr.p_double[tp1], 1, ae_v_len(i1,i)); + } + } +} + + +#endif +#if defined(AE_COMPILE_BLAS) || !defined(AE_PARTIAL_BUILD) + + +double vectornorm2(/* Real */ ae_vector* x, + ae_int_t i1, + ae_int_t i2, + ae_state *_state) +{ + ae_int_t n; + ae_int_t ix; + double absxi; + double scl; + double ssq; + double result; + + + n = i2-i1+1; + if( n<1 ) + { + result = (double)(0); + return result; + } + if( n==1 ) + { + result = ae_fabs(x->ptr.p_double[i1], _state); + return result; + } + scl = (double)(0); + ssq = (double)(1); + for(ix=i1; ix<=i2; ix++) + { + if( ae_fp_neq(x->ptr.p_double[ix],(double)(0)) ) + { + absxi = ae_fabs(x->ptr.p_double[ix], _state); + if( ae_fp_less(scl,absxi) ) + { + ssq = 1+ssq*ae_sqr(scl/absxi, _state); + scl = absxi; + } + else + { + ssq = ssq+ae_sqr(absxi/scl, _state); + } + } + } + result = scl*ae_sqrt(ssq, _state); + return result; +} + + +ae_int_t vectoridxabsmax(/* Real */ ae_vector* x, + ae_int_t i1, + ae_int_t i2, + ae_state *_state) +{ + ae_int_t i; + ae_int_t result; + + + result = i1; + for(i=i1+1; i<=i2; i++) + { + if( ae_fp_greater(ae_fabs(x->ptr.p_double[i], _state),ae_fabs(x->ptr.p_double[result], _state)) ) + { + result = i; + } + } + return result; +} + + +ae_int_t columnidxabsmax(/* Real */ ae_matrix* x, + ae_int_t i1, + ae_int_t i2, + ae_int_t j, + ae_state *_state) +{ + ae_int_t i; + ae_int_t result; + + + result = i1; + for(i=i1+1; i<=i2; i++) + { + if( ae_fp_greater(ae_fabs(x->ptr.pp_double[i][j], _state),ae_fabs(x->ptr.pp_double[result][j], _state)) ) + { + result = i; + } + } + return result; +} + + +ae_int_t rowidxabsmax(/* Real */ ae_matrix* x, + ae_int_t j1, + ae_int_t j2, + ae_int_t i, + ae_state *_state) +{ + ae_int_t j; + ae_int_t result; + + + result = j1; + for(j=j1+1; j<=j2; j++) + { + if( ae_fp_greater(ae_fabs(x->ptr.pp_double[i][j], _state),ae_fabs(x->ptr.pp_double[i][result], _state)) ) + { + result = j; + } + } + return result; +} + + +double upperhessenberg1norm(/* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t i2, + ae_int_t j1, + ae_int_t j2, + /* Real */ ae_vector* work, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + double result; + + + ae_assert(i2-i1==j2-j1, "UpperHessenberg1Norm: I2-I1<>J2-J1!", _state); + for(j=j1; j<=j2; j++) + { + work->ptr.p_double[j] = (double)(0); + } + for(i=i1; i<=i2; i++) + { + for(j=ae_maxint(j1, j1+i-i1-1, _state); j<=j2; j++) + { + work->ptr.p_double[j] = work->ptr.p_double[j]+ae_fabs(a->ptr.pp_double[i][j], _state); + } + } + result = (double)(0); + for(j=j1; j<=j2; j++) + { + result = ae_maxreal(result, work->ptr.p_double[j], _state); + } + return result; +} + + +void copymatrix(/* Real */ ae_matrix* a, + ae_int_t is1, + ae_int_t is2, + ae_int_t js1, + ae_int_t js2, + /* Real */ ae_matrix* b, + ae_int_t id1, + ae_int_t id2, + ae_int_t jd1, + ae_int_t jd2, + ae_state *_state) +{ + ae_int_t isrc; + ae_int_t idst; + + + if( is1>is2||js1>js2 ) + { + return; + } + ae_assert(is2-is1==id2-id1, "CopyMatrix: different sizes!", _state); + ae_assert(js2-js1==jd2-jd1, "CopyMatrix: different sizes!", _state); + for(isrc=is1; isrc<=is2; isrc++) + { + idst = isrc-is1+id1; + ae_v_move(&b->ptr.pp_double[idst][jd1], 1, &a->ptr.pp_double[isrc][js1], 1, ae_v_len(jd1,jd2)); + } +} + + +void inplacetranspose(/* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t i2, + ae_int_t j1, + ae_int_t j2, + /* Real */ ae_vector* work, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + ae_int_t ips; + ae_int_t jps; + ae_int_t l; + + + if( i1>i2||j1>j2 ) + { + return; + } + ae_assert(i1-i2==j1-j2, "InplaceTranspose error: incorrect array size!", _state); + for(i=i1; i<=i2-1; i++) + { + j = j1+i-i1; + ips = i+1; + jps = j1+ips-i1; + l = i2-i; + ae_v_move(&work->ptr.p_double[1], 1, &a->ptr.pp_double[ips][j], a->stride, ae_v_len(1,l)); + ae_v_move(&a->ptr.pp_double[ips][j], a->stride, &a->ptr.pp_double[i][jps], 1, ae_v_len(ips,i2)); + ae_v_move(&a->ptr.pp_double[i][jps], 1, &work->ptr.p_double[1], 1, ae_v_len(jps,j2)); + } +} + + +void copyandtranspose(/* Real */ ae_matrix* a, + ae_int_t is1, + ae_int_t is2, + ae_int_t js1, + ae_int_t js2, + /* Real */ ae_matrix* b, + ae_int_t id1, + ae_int_t id2, + ae_int_t jd1, + ae_int_t jd2, + ae_state *_state) +{ + ae_int_t isrc; + ae_int_t jdst; + + + if( is1>is2||js1>js2 ) + { + return; + } + ae_assert(is2-is1==jd2-jd1, "CopyAndTranspose: different sizes!", _state); + ae_assert(js2-js1==id2-id1, "CopyAndTranspose: different sizes!", _state); + for(isrc=is1; isrc<=is2; isrc++) + { + jdst = isrc-is1+jd1; + ae_v_move(&b->ptr.pp_double[id1][jdst], b->stride, &a->ptr.pp_double[isrc][js1], 1, ae_v_len(id1,id2)); + } +} + + +void matrixvectormultiply(/* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t i2, + ae_int_t j1, + ae_int_t j2, + ae_bool trans, + /* Real */ ae_vector* x, + ae_int_t ix1, + ae_int_t ix2, + double alpha, + /* Real */ ae_vector* y, + ae_int_t iy1, + ae_int_t iy2, + double beta, + ae_state *_state) +{ + ae_int_t i; + double v; + + + if( !trans ) + { + + /* + * y := alpha*A*x + beta*y; + */ + if( i1>i2||j1>j2 ) + { + return; + } + ae_assert(j2-j1==ix2-ix1, "MatrixVectorMultiply: A and X dont match!", _state); + ae_assert(i2-i1==iy2-iy1, "MatrixVectorMultiply: A and Y dont match!", _state); + + /* + * beta*y + */ + if( ae_fp_eq(beta,(double)(0)) ) + { + for(i=iy1; i<=iy2; i++) + { + y->ptr.p_double[i] = (double)(0); + } + } + else + { + ae_v_muld(&y->ptr.p_double[iy1], 1, ae_v_len(iy1,iy2), beta); + } + + /* + * alpha*A*x + */ + for(i=i1; i<=i2; i++) + { + v = ae_v_dotproduct(&a->ptr.pp_double[i][j1], 1, &x->ptr.p_double[ix1], 1, ae_v_len(j1,j2)); + y->ptr.p_double[iy1+i-i1] = y->ptr.p_double[iy1+i-i1]+alpha*v; + } + } + else + { + + /* + * y := alpha*A'*x + beta*y; + */ + if( i1>i2||j1>j2 ) + { + return; + } + ae_assert(i2-i1==ix2-ix1, "MatrixVectorMultiply: A and X dont match!", _state); + ae_assert(j2-j1==iy2-iy1, "MatrixVectorMultiply: A and Y dont match!", _state); + + /* + * beta*y + */ + if( ae_fp_eq(beta,(double)(0)) ) + { + for(i=iy1; i<=iy2; i++) + { + y->ptr.p_double[i] = (double)(0); + } + } + else + { + ae_v_muld(&y->ptr.p_double[iy1], 1, ae_v_len(iy1,iy2), beta); + } + + /* + * alpha*A'*x + */ + for(i=i1; i<=i2; i++) + { + v = alpha*x->ptr.p_double[ix1+i-i1]; + ae_v_addd(&y->ptr.p_double[iy1], 1, &a->ptr.pp_double[i][j1], 1, ae_v_len(iy1,iy2), v); + } + } +} + + +double pythag2(double x, double y, ae_state *_state) +{ + double w; + double xabs; + double yabs; + double z; + double result; + + + xabs = ae_fabs(x, _state); + yabs = ae_fabs(y, _state); + w = ae_maxreal(xabs, yabs, _state); + z = ae_minreal(xabs, yabs, _state); + if( ae_fp_eq(z,(double)(0)) ) + { + result = w; + } + else + { + result = w*ae_sqrt(1+ae_sqr(z/w, _state), _state); + } + return result; +} + + +void matrixmatrixmultiply(/* Real */ ae_matrix* a, + ae_int_t ai1, + ae_int_t ai2, + ae_int_t aj1, + ae_int_t aj2, + ae_bool transa, + /* Real */ ae_matrix* b, + ae_int_t bi1, + ae_int_t bi2, + ae_int_t bj1, + ae_int_t bj2, + ae_bool transb, + double alpha, + /* Real */ ae_matrix* c, + ae_int_t ci1, + ae_int_t ci2, + ae_int_t cj1, + ae_int_t cj2, + double beta, + /* Real */ ae_vector* work, + ae_state *_state) +{ + ae_int_t arows; + ae_int_t acols; + ae_int_t brows; + ae_int_t bcols; + ae_int_t crows; + ae_int_t i; + ae_int_t j; + ae_int_t k; + ae_int_t l; + ae_int_t r; + double v; + + + + /* + * Setup + */ + if( !transa ) + { + arows = ai2-ai1+1; + acols = aj2-aj1+1; + } + else + { + arows = aj2-aj1+1; + acols = ai2-ai1+1; + } + if( !transb ) + { + brows = bi2-bi1+1; + bcols = bj2-bj1+1; + } + else + { + brows = bj2-bj1+1; + bcols = bi2-bi1+1; + } + ae_assert(acols==brows, "MatrixMatrixMultiply: incorrect matrix sizes!", _state); + if( ((arows<=0||acols<=0)||brows<=0)||bcols<=0 ) + { + return; + } + crows = arows; + + /* + * Test WORK + */ + i = ae_maxint(arows, acols, _state); + i = ae_maxint(brows, i, _state); + i = ae_maxint(i, bcols, _state); + work->ptr.p_double[1] = (double)(0); + work->ptr.p_double[i] = (double)(0); + + /* + * Prepare C + */ + if( ae_fp_eq(beta,(double)(0)) ) + { + for(i=ci1; i<=ci2; i++) + { + for(j=cj1; j<=cj2; j++) + { + c->ptr.pp_double[i][j] = (double)(0); + } + } + } + else + { + for(i=ci1; i<=ci2; i++) + { + ae_v_muld(&c->ptr.pp_double[i][cj1], 1, ae_v_len(cj1,cj2), beta); + } + } + + /* + * A*B + */ + if( !transa&&!transb ) + { + for(l=ai1; l<=ai2; l++) + { + for(r=bi1; r<=bi2; r++) + { + v = alpha*a->ptr.pp_double[l][aj1+r-bi1]; + k = ci1+l-ai1; + ae_v_addd(&c->ptr.pp_double[k][cj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(cj1,cj2), v); + } + } + return; + } + + /* + * A*B' + */ + if( !transa&&transb ) + { + if( arows*acolsptr.pp_double[l][aj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(aj1,aj2)); + c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1] = c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1]+alpha*v; + } + } + return; + } + else + { + for(l=ai1; l<=ai2; l++) + { + for(r=bi1; r<=bi2; r++) + { + v = ae_v_dotproduct(&a->ptr.pp_double[l][aj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(aj1,aj2)); + c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1] = c->ptr.pp_double[ci1+l-ai1][cj1+r-bi1]+alpha*v; + } + } + return; + } + } + + /* + * A'*B + */ + if( transa&&!transb ) + { + for(l=aj1; l<=aj2; l++) + { + for(r=bi1; r<=bi2; r++) + { + v = alpha*a->ptr.pp_double[ai1+r-bi1][l]; + k = ci1+l-aj1; + ae_v_addd(&c->ptr.pp_double[k][cj1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(cj1,cj2), v); + } + } + return; + } + + /* + * A'*B' + */ + if( transa&&transb ) + { + if( arows*acolsptr.p_double[i] = 0.0; + } + for(l=ai1; l<=ai2; l++) + { + v = alpha*b->ptr.pp_double[r][bj1+l-ai1]; + ae_v_addd(&work->ptr.p_double[1], 1, &a->ptr.pp_double[l][aj1], 1, ae_v_len(1,crows), v); + } + ae_v_add(&c->ptr.pp_double[ci1][k], c->stride, &work->ptr.p_double[1], 1, ae_v_len(ci1,ci2)); + } + return; + } + else + { + for(l=aj1; l<=aj2; l++) + { + k = ai2-ai1+1; + ae_v_move(&work->ptr.p_double[1], 1, &a->ptr.pp_double[ai1][l], a->stride, ae_v_len(1,k)); + for(r=bi1; r<=bi2; r++) + { + v = ae_v_dotproduct(&work->ptr.p_double[1], 1, &b->ptr.pp_double[r][bj1], 1, ae_v_len(1,k)); + c->ptr.pp_double[ci1+l-aj1][cj1+r-bi1] = c->ptr.pp_double[ci1+l-aj1][cj1+r-bi1]+alpha*v; + } + } + return; + } + } +} + + +#endif +#if defined(AE_COMPILE_LINMIN) || !defined(AE_PARTIAL_BUILD) + + +/************************************************************************* +Normalizes direction/step pair: makes |D|=1, scales Stp. +If |D|=0, it returns, leavind D/Stp unchanged. + + -- ALGLIB -- + Copyright 01.04.2010 by Bochkanov Sergey +*************************************************************************/ +void linminnormalized(/* Real */ ae_vector* d, + double* stp, + ae_int_t n, + ae_state *_state) +{ + double mx; + double s; + ae_int_t i; + + + + /* + * first, scale D to avoid underflow/overflow durng squaring + */ + mx = (double)(0); + for(i=0; i<=n-1; i++) + { + mx = ae_maxreal(mx, ae_fabs(d->ptr.p_double[i], _state), _state); + } + if( ae_fp_eq(mx,(double)(0)) ) + { + return; + } + s = 1/mx; + ae_v_muld(&d->ptr.p_double[0], 1, ae_v_len(0,n-1), s); + *stp = *stp/s; + + /* + * normalize D + */ + s = ae_v_dotproduct(&d->ptr.p_double[0], 1, &d->ptr.p_double[0], 1, ae_v_len(0,n-1)); + s = 1/ae_sqrt(s, _state); + ae_v_muld(&d->ptr.p_double[0], 1, ae_v_len(0,n-1), s); + *stp = *stp/s; +} + + +/************************************************************************* +THE PURPOSE OF MCSRCH IS TO FIND A STEP WHICH SATISFIES A SUFFICIENT +DECREASE CONDITION AND A CURVATURE CONDITION. + +AT EACH STAGE THE SUBROUTINE UPDATES AN INTERVAL OF UNCERTAINTY WITH +ENDPOINTS STX AND STY. THE INTERVAL OF UNCERTAINTY IS INITIALLY CHOSEN +SO THAT IT CONTAINS A MINIMIZER OF THE MODIFIED FUNCTION + + F(X+STP*S) - F(X) - FTOL*STP*(GRADF(X)'S). + +IF A STEP IS OBTAINED FOR WHICH THE MODIFIED FUNCTION HAS A NONPOSITIVE +FUNCTION VALUE AND NONNEGATIVE DERIVATIVE, THEN THE INTERVAL OF +UNCERTAINTY IS CHOSEN SO THAT IT CONTAINS A MINIMIZER OF F(X+STP*S). + +THE ALGORITHM IS DESIGNED TO FIND A STEP WHICH SATISFIES THE SUFFICIENT +DECREASE CONDITION + + F(X+STP*S) .LE. F(X) + FTOL*STP*(GRADF(X)'S), + +AND THE CURVATURE CONDITION + + ABS(GRADF(X+STP*S)'S)) .LE. GTOL*ABS(GRADF(X)'S). + +IF FTOL IS LESS THAN GTOL AND IF, FOR EXAMPLE, THE FUNCTION IS BOUNDED +BELOW, THEN THERE IS ALWAYS A STEP WHICH SATISFIES BOTH CONDITIONS. +IF NO STEP CAN BE FOUND WHICH SATISFIES BOTH CONDITIONS, THEN THE +ALGORITHM USUALLY STOPS WHEN ROUNDING ERRORS PREVENT FURTHER PROGRESS. +IN THIS CASE STP ONLY SATISFIES THE SUFFICIENT DECREASE CONDITION. + + +:::::::::::::IMPORTANT NOTES::::::::::::: + +NOTE 1: + +This routine guarantees that it will stop at the last point where function +value was calculated. It won't make several additional function evaluations +after finding good point. So if you store function evaluations requested by +this routine, you can be sure that last one is the point where we've stopped. + +NOTE 2: + +when 0initial_point - after rounding to machine precision + +NOTE 4: + +when non-descent direction is specified, algorithm stops with MCINFO=0, +Stp=0 and initial point at X[]. +::::::::::::::::::::::::::::::::::::::::: + + +PARAMETERS DESCRIPRION + +STAGE IS ZERO ON FIRST CALL, ZERO ON FINAL EXIT + +N IS A POSITIVE INTEGER INPUT VARIABLE SET TO THE NUMBER OF VARIABLES. + +X IS AN ARRAY OF LENGTH N. ON INPUT IT MUST CONTAIN THE BASE POINT FOR +THE LINE SEARCH. ON OUTPUT IT CONTAINS X+STP*S. + +F IS A VARIABLE. ON INPUT IT MUST CONTAIN THE VALUE OF F AT X. ON OUTPUT +IT CONTAINS THE VALUE OF F AT X + STP*S. + +G IS AN ARRAY OF LENGTH N. ON INPUT IT MUST CONTAIN THE GRADIENT OF F AT X. +ON OUTPUT IT CONTAINS THE GRADIENT OF F AT X + STP*S. + +S IS AN INPUT ARRAY OF LENGTH N WHICH SPECIFIES THE SEARCH DIRECTION. + +STP IS A NONNEGATIVE VARIABLE. ON INPUT STP CONTAINS AN INITIAL ESTIMATE +OF A SATISFACTORY STEP. ON OUTPUT STP CONTAINS THE FINAL ESTIMATE. + +FTOL AND GTOL ARE NONNEGATIVE INPUT VARIABLES. TERMINATION OCCURS WHEN THE +SUFFICIENT DECREASE CONDITION AND THE DIRECTIONAL DERIVATIVE CONDITION ARE +SATISFIED. + +XTOL IS A NONNEGATIVE INPUT VARIABLE. TERMINATION OCCURS WHEN THE RELATIVE +WIDTH OF THE INTERVAL OF UNCERTAINTY IS AT MOST XTOL. + +STPMIN AND STPMAX ARE NONNEGATIVE INPUT VARIABLES WHICH SPECIFY LOWER AND +UPPER BOUNDS FOR THE STEP. + +MAXFEV IS A POSITIVE INTEGER INPUT VARIABLE. TERMINATION OCCURS WHEN THE +NUMBER OF CALLS TO FCN IS AT LEAST MAXFEV BY THE END OF AN ITERATION. + +INFO IS AN INTEGER OUTPUT VARIABLE SET AS FOLLOWS: + INFO = 0 IMPROPER INPUT PARAMETERS. + + INFO = 1 THE SUFFICIENT DECREASE CONDITION AND THE + DIRECTIONAL DERIVATIVE CONDITION HOLD. + + INFO = 2 RELATIVE WIDTH OF THE INTERVAL OF UNCERTAINTY + IS AT MOST XTOL. + + INFO = 3 NUMBER OF CALLS TO FCN HAS REACHED MAXFEV. + + INFO = 4 THE STEP IS AT THE LOWER BOUND STPMIN. + + INFO = 5 THE STEP IS AT THE UPPER BOUND STPMAX. + + INFO = 6 ROUNDING ERRORS PREVENT FURTHER PROGRESS. + THERE MAY NOT BE A STEP WHICH SATISFIES THE + SUFFICIENT DECREASE AND CURVATURE CONDITIONS. + TOLERANCES MAY BE TOO SMALL. + +NFEV IS AN INTEGER OUTPUT VARIABLE SET TO THE NUMBER OF CALLS TO FCN. + +WA IS A WORK ARRAY OF LENGTH N. + +ARGONNE NATIONAL LABORATORY. MINPACK PROJECT. JUNE 1983 +JORGE J. MORE', DAVID J. THUENTE +*************************************************************************/ +void mcsrch(ae_int_t n, + /* Real */ ae_vector* x, + double* f, + /* Real */ ae_vector* g, + /* Real */ ae_vector* s, + double* stp, + double stpmax, + double gtol, + ae_int_t* info, + ae_int_t* nfev, + /* Real */ ae_vector* wa, + linminstate* state, + ae_int_t* stage, + ae_state *_state) +{ + ae_int_t i; + double v; + double p5; + double p66; + double zero; + + + + /* + * init + */ + p5 = 0.5; + p66 = 0.66; + state->xtrapf = 4.0; + zero = (double)(0); + if( ae_fp_eq(stpmax,(double)(0)) ) + { + stpmax = linmin_defstpmax; + } + if( ae_fp_less(*stp,linmin_stpmin) ) + { + *stp = linmin_stpmin; + } + if( ae_fp_greater(*stp,stpmax) ) + { + *stp = stpmax; + } + + /* + * Main cycle + */ + for(;;) + { + if( *stage==0 ) + { + + /* + * NEXT + */ + *stage = 2; + continue; + } + if( *stage==2 ) + { + state->infoc = 1; + *info = 0; + + /* + * CHECK THE INPUT PARAMETERS FOR ERRORS. + */ + if( ae_fp_less(stpmax,linmin_stpmin)&&ae_fp_greater(stpmax,(double)(0)) ) + { + *info = 5; + *stp = stpmax; + *stage = 0; + return; + } + if( ((((((n<=0||ae_fp_less_eq(*stp,(double)(0)))||ae_fp_less(linmin_ftol,(double)(0)))||ae_fp_less(gtol,zero))||ae_fp_less(linmin_xtol,zero))||ae_fp_less(linmin_stpmin,zero))||ae_fp_less(stpmax,linmin_stpmin))||linmin_maxfev<=0 ) + { + *stage = 0; + return; + } + + /* + * COMPUTE THE INITIAL GRADIENT IN THE SEARCH DIRECTION + * AND CHECK THAT S IS A DESCENT DIRECTION. + */ + v = ae_v_dotproduct(&g->ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); + state->dginit = v; + if( ae_fp_greater_eq(state->dginit,(double)(0)) ) + { + *stage = 0; + *stp = (double)(0); + return; + } + + /* + * INITIALIZE LOCAL VARIABLES. + */ + state->brackt = ae_false; + state->stage1 = ae_true; + *nfev = 0; + state->finit = *f; + state->dgtest = linmin_ftol*state->dginit; + state->width = stpmax-linmin_stpmin; + state->width1 = state->width/p5; + ae_v_move(&wa->ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); + + /* + * THE VARIABLES STX, FX, DGX CONTAIN THE VALUES OF THE STEP, + * FUNCTION, AND DIRECTIONAL DERIVATIVE AT THE BEST STEP. + * THE VARIABLES STY, FY, DGY CONTAIN THE VALUE OF THE STEP, + * FUNCTION, AND DERIVATIVE AT THE OTHER ENDPOINT OF + * THE INTERVAL OF UNCERTAINTY. + * THE VARIABLES STP, F, DG CONTAIN THE VALUES OF THE STEP, + * FUNCTION, AND DERIVATIVE AT THE CURRENT STEP. + */ + state->stx = (double)(0); + state->fx = state->finit; + state->dgx = state->dginit; + state->sty = (double)(0); + state->fy = state->finit; + state->dgy = state->dginit; + + /* + * NEXT + */ + *stage = 3; + continue; + } + if( *stage==3 ) + { + + /* + * START OF ITERATION. + * + * SET THE MINIMUM AND MAXIMUM STEPS TO CORRESPOND + * TO THE PRESENT INTERVAL OF UNCERTAINTY. + */ + if( state->brackt ) + { + if( ae_fp_less(state->stx,state->sty) ) + { + state->stmin = state->stx; + state->stmax = state->sty; + } + else + { + state->stmin = state->sty; + state->stmax = state->stx; + } + } + else + { + state->stmin = state->stx; + state->stmax = *stp+state->xtrapf*(*stp-state->stx); + } + + /* + * FORCE THE STEP TO BE WITHIN THE BOUNDS STPMAX AND STPMIN. + */ + if( ae_fp_greater(*stp,stpmax) ) + { + *stp = stpmax; + } + if( ae_fp_less(*stp,linmin_stpmin) ) + { + *stp = linmin_stpmin; + } + + /* + * IF AN UNUSUAL TERMINATION IS TO OCCUR THEN LET + * STP BE THE LOWEST POINT OBTAINED SO FAR. + */ + if( (((state->brackt&&(ae_fp_less_eq(*stp,state->stmin)||ae_fp_greater_eq(*stp,state->stmax)))||*nfev>=linmin_maxfev-1)||state->infoc==0)||(state->brackt&&ae_fp_less_eq(state->stmax-state->stmin,linmin_xtol*state->stmax)) ) + { + *stp = state->stx; + } + + /* + * EVALUATE THE FUNCTION AND GRADIENT AT STP + * AND COMPUTE THE DIRECTIONAL DERIVATIVE. + */ + ae_v_move(&x->ptr.p_double[0], 1, &wa->ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_v_addd(&x->ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1), *stp); + + /* + * NEXT + */ + *stage = 4; + return; + } + if( *stage==4 ) + { + *info = 0; + *nfev = *nfev+1; + v = ae_v_dotproduct(&g->ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); + state->dg = v; + state->ftest1 = state->finit+*stp*state->dgtest; + + /* + * TEST FOR CONVERGENCE. + */ + if( (state->brackt&&(ae_fp_less_eq(*stp,state->stmin)||ae_fp_greater_eq(*stp,state->stmax)))||state->infoc==0 ) + { + *info = 6; + } + if( ((ae_fp_eq(*stp,stpmax)&&ae_fp_less(*f,state->finit))&&ae_fp_less_eq(*f,state->ftest1))&&ae_fp_less_eq(state->dg,state->dgtest) ) + { + *info = 5; + } + if( ae_fp_eq(*stp,linmin_stpmin)&&((ae_fp_greater_eq(*f,state->finit)||ae_fp_greater(*f,state->ftest1))||ae_fp_greater_eq(state->dg,state->dgtest)) ) + { + *info = 4; + } + if( *nfev>=linmin_maxfev ) + { + *info = 3; + } + if( state->brackt&&ae_fp_less_eq(state->stmax-state->stmin,linmin_xtol*state->stmax) ) + { + *info = 2; + } + if( (ae_fp_less(*f,state->finit)&&ae_fp_less_eq(*f,state->ftest1))&&ae_fp_less_eq(ae_fabs(state->dg, _state),-gtol*state->dginit) ) + { + *info = 1; + } + + /* + * CHECK FOR TERMINATION. + */ + if( *info!=0 ) + { + + /* + * Check guarantees provided by the function for INFO=1 or INFO=5 + */ + if( *info==1||*info==5 ) + { + v = 0.0; + for(i=0; i<=n-1; i++) + { + v = v+(wa->ptr.p_double[i]-x->ptr.p_double[i])*(wa->ptr.p_double[i]-x->ptr.p_double[i]); + } + if( ae_fp_greater_eq(*f,state->finit)||ae_fp_eq(v,0.0) ) + { + *info = 6; + } + } + *stage = 0; + return; + } + + /* + * IN THE FIRST STAGE WE SEEK A STEP FOR WHICH THE MODIFIED + * FUNCTION HAS A NONPOSITIVE VALUE AND NONNEGATIVE DERIVATIVE. + */ + if( (state->stage1&&ae_fp_less_eq(*f,state->ftest1))&&ae_fp_greater_eq(state->dg,ae_minreal(linmin_ftol, gtol, _state)*state->dginit) ) + { + state->stage1 = ae_false; + } + + /* + * A MODIFIED FUNCTION IS USED TO PREDICT THE STEP ONLY IF + * WE HAVE NOT OBTAINED A STEP FOR WHICH THE MODIFIED + * FUNCTION HAS A NONPOSITIVE FUNCTION VALUE AND NONNEGATIVE + * DERIVATIVE, AND IF A LOWER FUNCTION VALUE HAS BEEN + * OBTAINED BUT THE DECREASE IS NOT SUFFICIENT. + */ + if( (state->stage1&&ae_fp_less_eq(*f,state->fx))&&ae_fp_greater(*f,state->ftest1) ) + { + + /* + * DEFINE THE MODIFIED FUNCTION AND DERIVATIVE VALUES. + */ + state->fm = *f-*stp*state->dgtest; + state->fxm = state->fx-state->stx*state->dgtest; + state->fym = state->fy-state->sty*state->dgtest; + state->dgm = state->dg-state->dgtest; + state->dgxm = state->dgx-state->dgtest; + state->dgym = state->dgy-state->dgtest; + + /* + * CALL CSTEP TO UPDATE THE INTERVAL OF UNCERTAINTY + * AND TO COMPUTE THE NEW STEP. + */ + linmin_mcstep(&state->stx, &state->fxm, &state->dgxm, &state->sty, &state->fym, &state->dgym, stp, state->fm, state->dgm, &state->brackt, state->stmin, state->stmax, &state->infoc, _state); + + /* + * RESET THE FUNCTION AND GRADIENT VALUES FOR F. + */ + state->fx = state->fxm+state->stx*state->dgtest; + state->fy = state->fym+state->sty*state->dgtest; + state->dgx = state->dgxm+state->dgtest; + state->dgy = state->dgym+state->dgtest; + } + else + { + + /* + * CALL MCSTEP TO UPDATE THE INTERVAL OF UNCERTAINTY + * AND TO COMPUTE THE NEW STEP. + */ + linmin_mcstep(&state->stx, &state->fx, &state->dgx, &state->sty, &state->fy, &state->dgy, stp, *f, state->dg, &state->brackt, state->stmin, state->stmax, &state->infoc, _state); + } + + /* + * FORCE A SUFFICIENT DECREASE IN THE SIZE OF THE + * INTERVAL OF UNCERTAINTY. + */ + if( state->brackt ) + { + if( ae_fp_greater_eq(ae_fabs(state->sty-state->stx, _state),p66*state->width1) ) + { + *stp = state->stx+p5*(state->sty-state->stx); + } + state->width1 = state->width; + state->width = ae_fabs(state->sty-state->stx, _state); + } + + /* + * NEXT. + */ + *stage = 3; + continue; + } + } +} + + +/************************************************************************* +These functions perform Armijo line search using at most FMAX function +evaluations. It doesn't enforce some kind of " sufficient decrease" +criterion - it just tries different Armijo steps and returns optimum found +so far. + +Optimization is done using F-rcomm interface: +* ArmijoCreate initializes State structure + (reusing previously allocated buffers) +* ArmijoIteration is subsequently called +* ArmijoResults returns results + +INPUT PARAMETERS: + N - problem size + X - array[N], starting point + F - F(X+S*STP) + S - step direction, S>0 + STP - step length + STPMAX - maximum value for STP or zero (if no limit is imposed) + FMAX - maximum number of function evaluations + State - optimization state + + -- ALGLIB -- + Copyright 05.10.2010 by Bochkanov Sergey +*************************************************************************/ +void armijocreate(ae_int_t n, + /* Real */ ae_vector* x, + double f, + /* Real */ ae_vector* s, + double stp, + double stpmax, + ae_int_t fmax, + armijostate* state, + ae_state *_state) +{ + + + if( state->x.cntx, n, _state); + } + if( state->xbase.cntxbase, n, _state); + } + if( state->s.cnts, n, _state); + } + state->stpmax = stpmax; + state->fmax = fmax; + state->stplen = stp; + state->fcur = f; + state->n = n; + ae_v_move(&state->xbase.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_v_move(&state->s.ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_vector_set_length(&state->rstate.ia, 0+1, _state); + ae_vector_set_length(&state->rstate.ra, 0+1, _state); + state->rstate.stage = -1; +} + + +/************************************************************************* +This is rcomm-based search function + + -- ALGLIB -- + Copyright 05.10.2010 by Bochkanov Sergey +*************************************************************************/ +ae_bool armijoiteration(armijostate* state, ae_state *_state) +{ + double v; + ae_int_t n; + ae_bool result; + + + + /* + * Reverse communication preparations + * I know it looks ugly, but it works the same way + * anywhere from C++ to Python. + * + * This code initializes locals by: + * * random values determined during code + * generation - on first subroutine call + * * values from previous call - on subsequent calls + */ + if( state->rstate.stage>=0 ) + { + n = state->rstate.ia.ptr.p_int[0]; + v = state->rstate.ra.ptr.p_double[0]; + } + else + { + n = 359; + v = -58; + } + if( state->rstate.stage==0 ) + { + goto lbl_0; + } + if( state->rstate.stage==1 ) + { + goto lbl_1; + } + if( state->rstate.stage==2 ) + { + goto lbl_2; + } + if( state->rstate.stage==3 ) + { + goto lbl_3; + } + + /* + * Routine body + */ + if( (ae_fp_less_eq(state->stplen,(double)(0))||ae_fp_less(state->stpmax,(double)(0)))||state->fmax<2 ) + { + state->info = 0; + result = ae_false; + return result; + } + if( ae_fp_less_eq(state->stplen,linmin_stpmin) ) + { + state->info = 4; + result = ae_false; + return result; + } + n = state->n; + state->nfev = 0; + + /* + * We always need F + */ + state->needf = ae_true; + + /* + * Bound StpLen + */ + if( ae_fp_greater(state->stplen,state->stpmax)&&ae_fp_neq(state->stpmax,(double)(0)) ) + { + state->stplen = state->stpmax; + } + + /* + * Increase length + */ + v = state->stplen*linmin_armijofactor; + if( ae_fp_greater(v,state->stpmax)&&ae_fp_neq(state->stpmax,(double)(0)) ) + { + v = state->stpmax; + } + ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); + state->rstate.stage = 0; + goto lbl_rcomm; +lbl_0: + state->nfev = state->nfev+1; + if( ae_fp_greater_eq(state->f,state->fcur) ) + { + goto lbl_4; + } + state->stplen = v; + state->fcur = state->f; +lbl_6: + if( ae_false ) + { + goto lbl_7; + } + + /* + * test stopping conditions + */ + if( state->nfev>=state->fmax ) + { + state->info = 3; + result = ae_false; + return result; + } + if( ae_fp_greater_eq(state->stplen,state->stpmax) ) + { + state->info = 5; + result = ae_false; + return result; + } + + /* + * evaluate F + */ + v = state->stplen*linmin_armijofactor; + if( ae_fp_greater(v,state->stpmax)&&ae_fp_neq(state->stpmax,(double)(0)) ) + { + v = state->stpmax; + } + ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); + state->rstate.stage = 1; + goto lbl_rcomm; +lbl_1: + state->nfev = state->nfev+1; + + /* + * make decision + */ + if( ae_fp_less(state->f,state->fcur) ) + { + state->stplen = v; + state->fcur = state->f; + } + else + { + state->info = 1; + result = ae_false; + return result; + } + goto lbl_6; +lbl_7: +lbl_4: + + /* + * Decrease length + */ + v = state->stplen/linmin_armijofactor; + ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); + state->rstate.stage = 2; + goto lbl_rcomm; +lbl_2: + state->nfev = state->nfev+1; + if( ae_fp_greater_eq(state->f,state->fcur) ) + { + goto lbl_8; + } + state->stplen = state->stplen/linmin_armijofactor; + state->fcur = state->f; +lbl_10: + if( ae_false ) + { + goto lbl_11; + } + + /* + * test stopping conditions + */ + if( state->nfev>=state->fmax ) + { + state->info = 3; + result = ae_false; + return result; + } + if( ae_fp_less_eq(state->stplen,linmin_stpmin) ) + { + state->info = 4; + result = ae_false; + return result; + } + + /* + * evaluate F + */ + v = state->stplen/linmin_armijofactor; + ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); + ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); + state->rstate.stage = 3; + goto lbl_rcomm; +lbl_3: + state->nfev = state->nfev+1; + + /* + * make decision + */ + if( ae_fp_less(state->f,state->fcur) ) + { + state->stplen = state->stplen/linmin_armijofactor; + state->fcur = state->f; + } + else + { + state->info = 1; + result = ae_false; + return result; + } + goto lbl_10; +lbl_11: +lbl_8: + + /* + * Nothing to be done + */ + state->info = 1; + result = ae_false; + return result; + + /* + * Saving state + */ +lbl_rcomm: + result = ae_true; + state->rstate.ia.ptr.p_int[0] = n; + state->rstate.ra.ptr.p_double[0] = v; + return result; +} + + +/************************************************************************* +Results of Armijo search + +OUTPUT PARAMETERS: + INFO - on output it is set to one of the return codes: + * 0 improper input params + * 1 optimum step is found with at most FMAX evaluations + * 3 FMAX evaluations were used, + X contains optimum found so far + * 4 step is at lower bound STPMIN + * 5 step is at upper bound + STP - step length (in case of failure it is still returned) + F - function value (in case of failure it is still returned) + + -- ALGLIB -- + Copyright 05.10.2010 by Bochkanov Sergey +*************************************************************************/ +void armijoresults(armijostate* state, + ae_int_t* info, + double* stp, + double* f, + ae_state *_state) +{ + + + *info = state->info; + *stp = state->stplen; + *f = state->fcur; +} + + +static void linmin_mcstep(double* stx, + double* fx, + double* dx, + double* sty, + double* fy, + double* dy, + double* stp, + double fp, + double dp, + ae_bool* brackt, + double stmin, + double stmax, + ae_int_t* info, + ae_state *_state) +{ + ae_bool bound; + double gamma; + double p; + double q; + double r; + double s; + double sgnd; + double stpc; + double stpf; + double stpq; + double theta; + + + *info = 0; + + /* + * CHECK THE INPUT PARAMETERS FOR ERRORS. + */ + if( ((*brackt&&(ae_fp_less_eq(*stp,ae_minreal(*stx, *sty, _state))||ae_fp_greater_eq(*stp,ae_maxreal(*stx, *sty, _state))))||ae_fp_greater_eq(*dx*(*stp-(*stx)),(double)(0)))||ae_fp_less(stmax,stmin) ) + { + return; + } + + /* + * DETERMINE IF THE DERIVATIVES HAVE OPPOSITE SIGN. + */ + sgnd = dp*(*dx/ae_fabs(*dx, _state)); + + /* + * FIRST CASE. A HIGHER FUNCTION VALUE. + * THE MINIMUM IS BRACKETED. IF THE CUBIC STEP IS CLOSER + * TO STX THAN THE QUADRATIC STEP, THE CUBIC STEP IS TAKEN, + * ELSE THE AVERAGE OF THE CUBIC AND QUADRATIC STEPS IS TAKEN. + */ + if( ae_fp_greater(fp,*fx) ) + { + *info = 1; + bound = ae_true; + theta = 3*(*fx-fp)/(*stp-(*stx))+(*dx)+dp; + s = ae_maxreal(ae_fabs(theta, _state), ae_maxreal(ae_fabs(*dx, _state), ae_fabs(dp, _state), _state), _state); + gamma = s*ae_sqrt(ae_sqr(theta/s, _state)-*dx/s*(dp/s), _state); + if( ae_fp_less(*stp,*stx) ) + { + gamma = -gamma; + } + p = gamma-(*dx)+theta; + q = gamma-(*dx)+gamma+dp; + r = p/q; + stpc = *stx+r*(*stp-(*stx)); + stpq = *stx+*dx/((*fx-fp)/(*stp-(*stx))+(*dx))/2*(*stp-(*stx)); + if( ae_fp_less(ae_fabs(stpc-(*stx), _state),ae_fabs(stpq-(*stx), _state)) ) + { + stpf = stpc; + } + else + { + stpf = stpc+(stpq-stpc)/2; + } + *brackt = ae_true; + } + else + { + if( ae_fp_less(sgnd,(double)(0)) ) + { + + /* + * SECOND CASE. A LOWER FUNCTION VALUE AND DERIVATIVES OF + * OPPOSITE SIGN. THE MINIMUM IS BRACKETED. IF THE CUBIC + * STEP IS CLOSER TO STX THAN THE QUADRATIC (SECANT) STEP, + * THE CUBIC STEP IS TAKEN, ELSE THE QUADRATIC STEP IS TAKEN. + */ + *info = 2; + bound = ae_false; + theta = 3*(*fx-fp)/(*stp-(*stx))+(*dx)+dp; + s = ae_maxreal(ae_fabs(theta, _state), ae_maxreal(ae_fabs(*dx, _state), ae_fabs(dp, _state), _state), _state); + gamma = s*ae_sqrt(ae_sqr(theta/s, _state)-*dx/s*(dp/s), _state); + if( ae_fp_greater(*stp,*stx) ) + { + gamma = -gamma; + } + p = gamma-dp+theta; + q = gamma-dp+gamma+(*dx); + r = p/q; + stpc = *stp+r*(*stx-(*stp)); + stpq = *stp+dp/(dp-(*dx))*(*stx-(*stp)); + if( ae_fp_greater(ae_fabs(stpc-(*stp), _state),ae_fabs(stpq-(*stp), _state)) ) + { + stpf = stpc; + } + else + { + stpf = stpq; + } + *brackt = ae_true; + } + else + { + if( ae_fp_less(ae_fabs(dp, _state),ae_fabs(*dx, _state)) ) + { + + /* + * THIRD CASE. A LOWER FUNCTION VALUE, DERIVATIVES OF THE + * SAME SIGN, AND THE MAGNITUDE OF THE DERIVATIVE DECREASES. + * THE CUBIC STEP IS ONLY USED IF THE CUBIC TENDS TO INFINITY + * IN THE DIRECTION OF THE STEP OR IF THE MINIMUM OF THE CUBIC + * IS BEYOND STP. OTHERWISE THE CUBIC STEP IS DEFINED TO BE + * EITHER STPMIN OR STPMAX. THE QUADRATIC (SECANT) STEP IS ALSO + * COMPUTED AND IF THE MINIMUM IS BRACKETED THEN THE THE STEP + * CLOSEST TO STX IS TAKEN, ELSE THE STEP FARTHEST AWAY IS TAKEN. + */ + *info = 3; + bound = ae_true; + theta = 3*(*fx-fp)/(*stp-(*stx))+(*dx)+dp; + s = ae_maxreal(ae_fabs(theta, _state), ae_maxreal(ae_fabs(*dx, _state), ae_fabs(dp, _state), _state), _state); + + /* + * THE CASE GAMMA = 0 ONLY ARISES IF THE CUBIC DOES NOT TEND + * TO INFINITY IN THE DIRECTION OF THE STEP. + */ + gamma = s*ae_sqrt(ae_maxreal((double)(0), ae_sqr(theta/s, _state)-*dx/s*(dp/s), _state), _state); + if( ae_fp_greater(*stp,*stx) ) + { + gamma = -gamma; + } + p = gamma-dp+theta; + q = gamma+(*dx-dp)+gamma; + r = p/q; + if( ae_fp_less(r,(double)(0))&&ae_fp_neq(gamma,(double)(0)) ) + { + stpc = *stp+r*(*stx-(*stp)); + } + else + { + if( ae_fp_greater(*stp,*stx) ) + { + stpc = stmax; + } + else + { + stpc = stmin; + } + } + stpq = *stp+dp/(dp-(*dx))*(*stx-(*stp)); + if( *brackt ) + { + if( ae_fp_less(ae_fabs(*stp-stpc, _state),ae_fabs(*stp-stpq, _state)) ) + { + stpf = stpc; + } + else + { + stpf = stpq; + } + } + else + { + if( ae_fp_greater(ae_fabs(*stp-stpc, _state),ae_fabs(*stp-stpq, _state)) ) + { + stpf = stpc; + } + else + { + stpf = stpq; + } + } + } + else + { + + /* + * FOURTH CASE. A LOWER FUNCTION VALUE, DERIVATIVES OF THE + * SAME SIGN, AND THE MAGNITUDE OF THE DERIVATIVE DOES + * NOT DECREASE. IF THE MINIMUM IS NOT BRACKETED, THE STEP + * IS EITHER STPMIN OR STPMAX, ELSE THE CUBIC STEP IS TAKEN. + */ + *info = 4; + bound = ae_false; + if( *brackt ) + { + theta = 3*(fp-(*fy))/(*sty-(*stp))+(*dy)+dp; + s = ae_maxreal(ae_fabs(theta, _state), ae_maxreal(ae_fabs(*dy, _state), ae_fabs(dp, _state), _state), _state); + gamma = s*ae_sqrt(ae_sqr(theta/s, _state)-*dy/s*(dp/s), _state); + if( ae_fp_greater(*stp,*sty) ) + { + gamma = -gamma; + } + p = gamma-dp+theta; + q = gamma-dp+gamma+(*dy); + r = p/q; + stpc = *stp+r*(*sty-(*stp)); + stpf = stpc; + } + else + { + if( ae_fp_greater(*stp,*stx) ) + { + stpf = stmax; + } + else + { + stpf = stmin; + } + } + } + } + } + + /* + * UPDATE THE INTERVAL OF UNCERTAINTY. THIS UPDATE DOES NOT + * DEPEND ON THE NEW STEP OR THE CASE ANALYSIS ABOVE. + */ + if( ae_fp_greater(fp,*fx) ) + { + *sty = *stp; + *fy = fp; + *dy = dp; + } + else + { + if( ae_fp_less(sgnd,0.0) ) + { + *sty = *stx; + *fy = *fx; + *dy = *dx; + } + *stx = *stp; + *fx = fp; + *dx = dp; + } + + /* + * COMPUTE THE NEW STEP AND SAFEGUARD IT. + */ + stpf = ae_minreal(stmax, stpf, _state); + stpf = ae_maxreal(stmin, stpf, _state); + *stp = stpf; + if( *brackt&&bound ) + { + if( ae_fp_greater(*sty,*stx) ) + { + *stp = ae_minreal(*stx+0.66*(*sty-(*stx)), *stp, _state); + } + else + { + *stp = ae_maxreal(*stx+0.66*(*sty-(*stx)), *stp, _state); + } + } +} + + +void _linminstate_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + linminstate *p = (linminstate*)_p; + ae_touch_ptr((void*)p); +} + + +void _linminstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + linminstate *dst = (linminstate*)_dst; + linminstate *src = (linminstate*)_src; + dst->brackt = src->brackt; + dst->stage1 = src->stage1; + dst->infoc = src->infoc; + dst->dg = src->dg; + dst->dgm = src->dgm; + dst->dginit = src->dginit; + dst->dgtest = src->dgtest; + dst->dgx = src->dgx; + dst->dgxm = src->dgxm; + dst->dgy = src->dgy; + dst->dgym = src->dgym; + dst->finit = src->finit; + dst->ftest1 = src->ftest1; + dst->fm = src->fm; + dst->fx = src->fx; + dst->fxm = src->fxm; + dst->fy = src->fy; + dst->fym = src->fym; + dst->stx = src->stx; + dst->sty = src->sty; + dst->stmin = src->stmin; + dst->stmax = src->stmax; + dst->width = src->width; + dst->width1 = src->width1; + dst->xtrapf = src->xtrapf; +} + + +void _linminstate_clear(void* _p) +{ + linminstate *p = (linminstate*)_p; + ae_touch_ptr((void*)p); +} + + +void _linminstate_destroy(void* _p) +{ + linminstate *p = (linminstate*)_p; + ae_touch_ptr((void*)p); +} + + +void _armijostate_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + armijostate *p = (armijostate*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic); + _rcommstate_init(&p->rstate, _state, make_automatic); +} + + +void _armijostate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + armijostate *dst = (armijostate*)_dst; + armijostate *src = (armijostate*)_src; + dst->needf = src->needf; + ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); + dst->f = src->f; + dst->n = src->n; + ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic); + ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic); + dst->stplen = src->stplen; + dst->fcur = src->fcur; + dst->stpmax = src->stpmax; + dst->fmax = src->fmax; + dst->nfev = src->nfev; + dst->info = src->info; + _rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic); +} + + +void _armijostate_clear(void* _p) +{ + armijostate *p = (armijostate*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->x); + ae_vector_clear(&p->xbase); + ae_vector_clear(&p->s); + _rcommstate_clear(&p->rstate); +} + + +void _armijostate_destroy(void* _p) +{ + armijostate *p = (armijostate*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->x); + ae_vector_destroy(&p->xbase); + ae_vector_destroy(&p->s); + _rcommstate_destroy(&p->rstate); +} + + +#endif +#if defined(AE_COMPILE_XBLAS) || !defined(AE_PARTIAL_BUILD) + + +/************************************************************************* +More precise dot-product. Absolute error of subroutine result is about +1 ulp of max(MX,V), where: + MX = max( |a[i]*b[i]| ) + V = |(a,b)| + +INPUT PARAMETERS + A - array[0..N-1], vector 1 + B - array[0..N-1], vector 2 + N - vectors length, N<2^29. + Temp - array[0..N-1], pre-allocated temporary storage + +OUTPUT PARAMETERS + R - (A,B) + RErr - estimate of error. This estimate accounts for both errors + during calculation of (A,B) and errors introduced by + rounding of A and B to fit in double (about 1 ulp). + + -- ALGLIB -- + Copyright 24.08.2009 by Bochkanov Sergey +*************************************************************************/ +void xdot(/* Real */ ae_vector* a, + /* Real */ ae_vector* b, + ae_int_t n, + /* Real */ ae_vector* temp, + double* r, + double* rerr, + ae_state *_state) +{ + ae_int_t i; + double mx; + double v; + + *r = 0; + *rerr = 0; + + + /* + * special cases: + * * N=0 + */ + if( n==0 ) + { + *r = (double)(0); + *rerr = (double)(0); + return; + } + mx = (double)(0); + for(i=0; i<=n-1; i++) + { + v = a->ptr.p_double[i]*b->ptr.p_double[i]; + temp->ptr.p_double[i] = v; + mx = ae_maxreal(mx, ae_fabs(v, _state), _state); + } + if( ae_fp_eq(mx,(double)(0)) ) + { + *r = (double)(0); + *rerr = (double)(0); + return; + } + xblas_xsum(temp, mx, n, r, rerr, _state); +} + + +/************************************************************************* +More precise complex dot-product. Absolute error of subroutine result is +about 1 ulp of max(MX,V), where: + MX = max( |a[i]*b[i]| ) + V = |(a,b)| + +INPUT PARAMETERS + A - array[0..N-1], vector 1 + B - array[0..N-1], vector 2 + N - vectors length, N<2^29. + Temp - array[0..2*N-1], pre-allocated temporary storage + +OUTPUT PARAMETERS + R - (A,B) + RErr - estimate of error. This estimate accounts for both errors + during calculation of (A,B) and errors introduced by + rounding of A and B to fit in double (about 1 ulp). + + -- ALGLIB -- + Copyright 27.01.2010 by Bochkanov Sergey +*************************************************************************/ +void xcdot(/* Complex */ ae_vector* a, + /* Complex */ ae_vector* b, + ae_int_t n, + /* Real */ ae_vector* temp, + ae_complex* r, + double* rerr, + ae_state *_state) +{ + ae_int_t i; + double mx; + double v; + double rerrx; + double rerry; + + r->x = 0; + r->y = 0; + *rerr = 0; + + + /* + * special cases: + * * N=0 + */ + if( n==0 ) + { + *r = ae_complex_from_i(0); + *rerr = (double)(0); + return; + } + + /* + * calculate real part + */ + mx = (double)(0); + for(i=0; i<=n-1; i++) + { + v = a->ptr.p_complex[i].x*b->ptr.p_complex[i].x; + temp->ptr.p_double[2*i+0] = v; + mx = ae_maxreal(mx, ae_fabs(v, _state), _state); + v = -a->ptr.p_complex[i].y*b->ptr.p_complex[i].y; + temp->ptr.p_double[2*i+1] = v; + mx = ae_maxreal(mx, ae_fabs(v, _state), _state); + } + if( ae_fp_eq(mx,(double)(0)) ) + { + r->x = (double)(0); + rerrx = (double)(0); + } + else + { + xblas_xsum(temp, mx, 2*n, &r->x, &rerrx, _state); + } + + /* + * calculate imaginary part + */ + mx = (double)(0); + for(i=0; i<=n-1; i++) + { + v = a->ptr.p_complex[i].x*b->ptr.p_complex[i].y; + temp->ptr.p_double[2*i+0] = v; + mx = ae_maxreal(mx, ae_fabs(v, _state), _state); + v = a->ptr.p_complex[i].y*b->ptr.p_complex[i].x; + temp->ptr.p_double[2*i+1] = v; + mx = ae_maxreal(mx, ae_fabs(v, _state), _state); + } + if( ae_fp_eq(mx,(double)(0)) ) + { + r->y = (double)(0); + rerry = (double)(0); + } + else + { + xblas_xsum(temp, mx, 2*n, &r->y, &rerry, _state); + } + + /* + * total error + */ + if( ae_fp_eq(rerrx,(double)(0))&&ae_fp_eq(rerry,(double)(0)) ) + { + *rerr = (double)(0); + } + else + { + *rerr = ae_maxreal(rerrx, rerry, _state)*ae_sqrt(1+ae_sqr(ae_minreal(rerrx, rerry, _state)/ae_maxreal(rerrx, rerry, _state), _state), _state); } } /************************************************************************* -These functions perform Armijo line search using at most FMAX function -evaluations. It doesn't enforce some kind of " sufficient decrease" -criterion - it just tries different Armijo steps and returns optimum found -so far. - -Optimization is done using F-rcomm interface: -* ArmijoCreate initializes State structure - (reusing previously allocated buffers) -* ArmijoIteration is subsequently called -* ArmijoResults returns results +Internal subroutine for extra-precise calculation of SUM(w[i]). INPUT PARAMETERS: - N - problem size - X - array[N], starting point - F - F(X+S*STP) - S - step direction, S>0 - STP - step length - STPMAX - maximum value for STP or zero (if no limit is imposed) - FMAX - maximum number of function evaluations - State - optimization state + W - array[0..N-1], values to be added + W is modified during calculations. + MX - max(W[i]) + N - array size + +OUTPUT PARAMETERS: + R - SUM(w[i]) + RErr- error estimate for R -- ALGLIB -- - Copyright 05.10.2010 by Bochkanov Sergey + Copyright 24.08.2009 by Bochkanov Sergey *************************************************************************/ -void armijocreate(ae_int_t n, - /* Real */ ae_vector* x, - double f, - /* Real */ ae_vector* s, - double stp, - double stpmax, - ae_int_t fmax, - armijostate* state, +static void xblas_xsum(/* Real */ ae_vector* w, + double mx, + ae_int_t n, + double* r, + double* rerr, ae_state *_state) { + ae_int_t i; + ae_int_t k; + ae_int_t ks; + double v; + double s; + double ln2; + double chunk; + double invchunk; + ae_bool allzeros; + *r = 0; + *rerr = 0; - if( state->x.cntx, n, _state); + *r = (double)(0); + *rerr = (double)(0); + return; } - if( state->xbase.cntxbase, n, _state); + *r = (double)(0); + *rerr = (double)(0); + return; } - if( state->s.cnts, n, _state); + + /* + * Overflow or underflow during evaluation of S; fallback low-precision code + */ + *r = (double)(0); + *rerr = mx*ae_machineepsilon; + for(i=0; i<=n-1; i++) + { + *r = *r+w->ptr.p_double[i]; + } + return; } - state->stpmax = stpmax; - state->fmax = fmax; - state->stplen = stp; - state->fcur = f; - state->n = n; - ae_v_move(&state->xbase.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_v_move(&state->s.ptr.p_double[0], 1, &s->ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_vector_set_length(&state->rstate.ia, 0+1, _state); - ae_vector_set_length(&state->rstate.ra, 0+1, _state); - state->rstate.stage = -1; + while(ae_fp_greater_eq(s*mx,(double)(1))) + { + s = 0.5*s; + } + while(ae_fp_less(s*mx,0.5)) + { + s = 2*s; + } + ae_v_muld(&w->ptr.p_double[0], 1, ae_v_len(0,n-1), s); + s = 1/s; + + /* + * find Chunk=2^M such that N*Chunk<2^29 + * + * we have chosen upper limit (2^29) with enough space left + * to tolerate possible problems with rounding and N's close + * to the limit, so we don't want to be very strict here. + */ + k = ae_trunc(ae_log((double)536870912/(double)n, _state)/ln2, _state); + chunk = xblas_xfastpow((double)(2), k, _state); + if( ae_fp_less(chunk,(double)(2)) ) + { + chunk = (double)(2); + } + invchunk = 1/chunk; + + /* + * calculate result + */ + *r = (double)(0); + ae_v_muld(&w->ptr.p_double[0], 1, ae_v_len(0,n-1), chunk); + for(;;) + { + s = s*invchunk; + allzeros = ae_true; + ks = 0; + for(i=0; i<=n-1; i++) + { + v = w->ptr.p_double[i]; + k = ae_trunc(v, _state); + if( ae_fp_neq(v,(double)(k)) ) + { + allzeros = ae_false; + } + w->ptr.p_double[i] = chunk*(v-k); + ks = ks+k; + } + *r = *r+s*ks; + v = ae_fabs(*r, _state); + if( allzeros||ae_fp_eq(s*n+mx,mx) ) + { + break; + } + } + + /* + * correct error + */ + *rerr = ae_maxreal(*rerr, ae_fabs(*r, _state)*ae_machineepsilon, _state); } /************************************************************************* -This is rcomm-based search function +Fast Pow -- ALGLIB -- - Copyright 05.10.2010 by Bochkanov Sergey + Copyright 24.08.2009 by Bochkanov Sergey *************************************************************************/ -ae_bool armijoiteration(armijostate* state, ae_state *_state) +static double xblas_xfastpow(double r, ae_int_t n, ae_state *_state) { - double v; - ae_int_t n; - ae_bool result; + double result; - - /* - * Reverse communication preparations - * I know it looks ugly, but it works the same way - * anywhere from C++ to Python. - * - * This code initializes locals by: - * * random values determined during code - * generation - on first subroutine call - * * values from previous call - on subsequent calls - */ - if( state->rstate.stage>=0 ) - { - n = state->rstate.ia.ptr.p_int[0]; - v = state->rstate.ra.ptr.p_double[0]; - } - else - { - n = -983; - v = -989; - } - if( state->rstate.stage==0 ) - { - goto lbl_0; - } - if( state->rstate.stage==1 ) + result = (double)(0); + if( n>0 ) { - goto lbl_1; + if( n%2==0 ) + { + result = ae_sqr(xblas_xfastpow(r, n/2, _state), _state); + } + else + { + result = r*xblas_xfastpow(r, n-1, _state); + } + return result; } - if( state->rstate.stage==2 ) + if( n==0 ) { - goto lbl_2; + result = (double)(1); } - if( state->rstate.stage==3 ) + if( n<0 ) { - goto lbl_3; + result = xblas_xfastpow(1/r, -n, _state); } + return result; +} + + +#endif +#if defined(AE_COMPILE_BASICSTATOPS) || !defined(AE_PARTIAL_BUILD) + + +/************************************************************************* +Internal tied ranking subroutine. + +INPUT PARAMETERS: + X - array to rank + N - array size + IsCentered- whether ranks are centered or not: + * True - ranks are centered in such way that their + sum is zero + * False - ranks are not centered + Buf - temporary buffers + +NOTE: when IsCentered is True and all X[] are equal, this function fills + X by zeros (exact zeros are used, not sum which is only approximately + equal to zero). +*************************************************************************/ +void rankx(/* Real */ ae_vector* x, + ae_int_t n, + ae_bool iscentered, + apbuffers* buf, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + ae_int_t k; + double tmp; + double voffs; + + /* - * Routine body + * Prepare */ - if( (ae_fp_less_eq(state->stplen,(double)(0))||ae_fp_less(state->stpmax,(double)(0)))||state->fmax<2 ) - { - state->info = 0; - result = ae_false; - return result; - } - if( ae_fp_less_eq(state->stplen,linmin_stpmin) ) + if( n<1 ) { - state->info = 4; - result = ae_false; - return result; + return; } - n = state->n; - state->nfev = 0; - - /* - * We always need F - */ - state->needf = ae_true; - - /* - * Bound StpLen - */ - if( ae_fp_greater(state->stplen,state->stpmax)&&ae_fp_neq(state->stpmax,(double)(0)) ) + if( n==1 ) { - state->stplen = state->stpmax; + x->ptr.p_double[0] = (double)(0); + return; } - - /* - * Increase length - */ - v = state->stplen*linmin_armijofactor; - if( ae_fp_greater(v,state->stpmax)&&ae_fp_neq(state->stpmax,(double)(0)) ) + if( buf->ra1.cntstpmax; + ae_vector_set_length(&buf->ra1, n, _state); } - ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); - state->rstate.stage = 0; - goto lbl_rcomm; -lbl_0: - state->nfev = state->nfev+1; - if( ae_fp_greater_eq(state->f,state->fcur) ) + if( buf->ia1.cntia1, n, _state); } - state->stplen = v; - state->fcur = state->f; -lbl_6: - if( ae_false ) + for(i=0; i<=n-1; i++) { - goto lbl_7; + buf->ra1.ptr.p_double[i] = x->ptr.p_double[i]; + buf->ia1.ptr.p_int[i] = i; } + tagsortfasti(&buf->ra1, &buf->ia1, &buf->ra2, &buf->ia2, n, _state); /* - * test stopping conditions + * Special test for all values being equal */ - if( state->nfev>=state->fmax ) - { - state->info = 3; - result = ae_false; - return result; - } - if( ae_fp_greater_eq(state->stplen,state->stpmax) ) + if( ae_fp_eq(buf->ra1.ptr.p_double[0],buf->ra1.ptr.p_double[n-1]) ) { - state->info = 5; - result = ae_false; - return result; + if( iscentered ) + { + tmp = 0.0; + } + else + { + tmp = (double)(n-1)/(double)2; + } + for(i=0; i<=n-1; i++) + { + x->ptr.p_double[i] = tmp; + } + return; } /* - * evaluate F + * compute tied ranks */ - v = state->stplen*linmin_armijofactor; - if( ae_fp_greater(v,state->stpmax)&&ae_fp_neq(state->stpmax,(double)(0)) ) + i = 0; + while(i<=n-1) { - v = state->stpmax; + j = i+1; + while(j<=n-1) + { + if( ae_fp_neq(buf->ra1.ptr.p_double[j],buf->ra1.ptr.p_double[i]) ) + { + break; + } + j = j+1; + } + for(k=i; k<=j-1; k++) + { + buf->ra1.ptr.p_double[k] = (double)(i+j-1)/(double)2; + } + i = j; } - ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); - state->rstate.stage = 1; - goto lbl_rcomm; -lbl_1: - state->nfev = state->nfev+1; /* - * make decision + * back to x */ - if( ae_fp_less(state->f,state->fcur) ) + if( iscentered ) { - state->stplen = v; - state->fcur = state->f; + voffs = (double)(n-1)/(double)2; } else { - state->info = 1; - result = ae_false; - return result; + voffs = 0.0; } - goto lbl_6; -lbl_7: -lbl_4: + for(i=0; i<=n-1; i++) + { + x->ptr.p_double[buf->ia1.ptr.p_int[i]] = buf->ra1.ptr.p_double[i]-voffs; + } +} + + +/************************************************************************* +Internal untied ranking subroutine. + +INPUT PARAMETERS: + X - array to rank + N - array size + Buf - temporary buffers + +Returns untied ranks (in case of a tie ranks are resolved arbitrarily). +*************************************************************************/ +void rankxuntied(/* Real */ ae_vector* x, + ae_int_t n, + apbuffers* buf, + ae_state *_state) +{ + ae_int_t i; + + /* - * Decrease length + * Prepare */ - v = state->stplen/linmin_armijofactor; - ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); - state->rstate.stage = 2; - goto lbl_rcomm; -lbl_2: - state->nfev = state->nfev+1; - if( ae_fp_greater_eq(state->f,state->fcur) ) + if( n<1 ) { - goto lbl_8; + return; } - state->stplen = state->stplen/linmin_armijofactor; - state->fcur = state->f; -lbl_10: - if( ae_false ) + if( n==1 ) { - goto lbl_11; + x->ptr.p_double[0] = (double)(0); + return; } - - /* - * test stopping conditions - */ - if( state->nfev>=state->fmax ) + if( buf->ra1.cntinfo = 3; - result = ae_false; - return result; + ae_vector_set_length(&buf->ra1, n, _state); } - if( ae_fp_less_eq(state->stplen,linmin_stpmin) ) + if( buf->ia1.cntinfo = 4; - result = ae_false; - return result; + ae_vector_set_length(&buf->ia1, n, _state); } - - /* - * evaluate F - */ - v = state->stplen/linmin_armijofactor; - ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); - ae_v_addd(&state->x.ptr.p_double[0], 1, &state->s.ptr.p_double[0], 1, ae_v_len(0,n-1), v); - state->rstate.stage = 3; - goto lbl_rcomm; -lbl_3: - state->nfev = state->nfev+1; - - /* - * make decision - */ - if( ae_fp_less(state->f,state->fcur) ) + for(i=0; i<=n-1; i++) { - state->stplen = state->stplen/linmin_armijofactor; - state->fcur = state->f; + buf->ra1.ptr.p_double[i] = x->ptr.p_double[i]; + buf->ia1.ptr.p_int[i] = i; } - else + tagsortfasti(&buf->ra1, &buf->ia1, &buf->ra2, &buf->ia2, n, _state); + for(i=0; i<=n-1; i++) { - state->info = 1; - result = ae_false; - return result; + x->ptr.p_double[buf->ia1.ptr.p_int[i]] = (double)(i); } - goto lbl_10; -lbl_11: -lbl_8: - - /* - * Nothing to be done - */ - state->info = 1; - result = ae_false; - return result; - - /* - * Saving state - */ -lbl_rcomm: - result = ae_true; - state->rstate.ia.ptr.p_int[0] = n; - state->rstate.ra.ptr.p_double[0] = v; - return result; } -/************************************************************************* -Results of Armijo search - -OUTPUT PARAMETERS: - INFO - on output it is set to one of the return codes: - * 0 improper input params - * 1 optimum step is found with at most FMAX evaluations - * 3 FMAX evaluations were used, - X contains optimum found so far - * 4 step is at lower bound STPMIN - * 5 step is at upper bound - STP - step length (in case of failure it is still returned) - F - function value (in case of failure it is still returned) - - -- ALGLIB -- - Copyright 05.10.2010 by Bochkanov Sergey -*************************************************************************/ -void armijoresults(armijostate* state, - ae_int_t* info, - double* stp, - double* f, - ae_state *_state) -{ +#endif +#if defined(AE_COMPILE_HPCCORES) || !defined(AE_PARTIAL_BUILD) - *info = state->info; - *stp = state->stplen; - *f = state->fcur; -} +/************************************************************************* +Prepares HPC compuations of chunked gradient with HPCChunkedGradient(). +You have to call this function before calling HPCChunkedGradient() for +a new set of weights. You have to call it only once, see example below: +HOW TO PROCESS DATASET WITH THIS FUNCTION: + Grad:=0 + HPCPrepareChunkedGradient(Weights, WCount, NTotal, NOut, Buf) + foreach chunk-of-dataset do + HPCChunkedGradient(...) + HPCFinalizeChunkedGradient(Buf, Grad) -static void linmin_mcstep(double* stx, - double* fx, - double* dx, - double* sty, - double* fy, - double* dy, - double* stp, - double fp, - double dp, - ae_bool* brackt, - double stmin, - double stmax, - ae_int_t* info, +*************************************************************************/ +void hpcpreparechunkedgradient(/* Real */ ae_vector* weights, + ae_int_t wcount, + ae_int_t ntotal, + ae_int_t nin, + ae_int_t nout, + mlpbuffers* buf, ae_state *_state) { - ae_bool bound; - double gamma; - double p; - double q; - double r; - double s; - double sgnd; - double stpc; - double stpf; - double stpq; - double theta; + ae_int_t i; + ae_int_t batch4size; + ae_int_t chunksize; - *info = 0; - - /* - * CHECK THE INPUT PARAMETERS FOR ERRORS. - */ - if( ((*brackt&&(ae_fp_less_eq(*stp,ae_minreal(*stx, *sty, _state))||ae_fp_greater_eq(*stp,ae_maxreal(*stx, *sty, _state))))||ae_fp_greater_eq(*dx*(*stp-(*stx)),(double)(0)))||ae_fp_less(stmax,stmin) ) + chunksize = 4; + batch4size = 3*chunksize*ntotal+chunksize*(2*nout+1); + if( buf->xy.rowsxy.colsxy, chunksize, nin+nout, _state); } - - /* - * DETERMINE IF THE DERIVATIVES HAVE OPPOSITE SIGN. - */ - sgnd = dp*(*dx/ae_fabs(*dx, _state)); - - /* - * FIRST CASE. A HIGHER FUNCTION VALUE. - * THE MINIMUM IS BRACKETED. IF THE CUBIC STEP IS CLOSER - * TO STX THAN THE QUADRATIC STEP, THE CUBIC STEP IS TAKEN, - * ELSE THE AVERAGE OF THE CUBIC AND QUADRATIC STEPS IS TAKEN. - */ - if( ae_fp_greater(fp,*fx) ) + if( buf->xy2.rowsxy2.colsxy2, chunksize, nin+nout, _state); } - else + if( buf->xyrow.cntxyrow, nin+nout, _state); + } + if( buf->x.cntx, nin, _state); + } + if( buf->y.cnty, nout, _state); + } + if( buf->desiredy.cntdesiredy, nout, _state); } - - /* - * UPDATE THE INTERVAL OF UNCERTAINTY. THIS UPDATE DOES NOT - * DEPEND ON THE NEW STEP OR THE CASE ANALYSIS ABOVE. - */ - if( ae_fp_greater(fp,*fx) ) + if( buf->batch4buf.cntbatch4buf, batch4size, _state); } - else + if( buf->hpcbuf.cnthpcbuf, wcount, _state); } - - /* - * COMPUTE THE NEW STEP AND SAFEGUARD IT. - */ - stpf = ae_minreal(stmax, stpf, _state); - stpf = ae_maxreal(stmin, stpf, _state); - *stp = stpf; - if( *brackt&&bound ) + if( buf->g.cntg, wcount, _state); + } + if( !hpccores_hpcpreparechunkedgradientx(weights, wcount, &buf->hpcbuf, _state) ) + { + for(i=0; i<=wcount-1; i++) { - *stp = ae_minreal(*stx+0.66*(*sty-(*stx)), *stp, _state); + buf->hpcbuf.ptr.p_double[i] = 0.0; } - else + } + buf->wcount = wcount; + buf->ntotal = ntotal; + buf->nin = nin; + buf->nout = nout; + buf->chunksize = chunksize; +} + + +/************************************************************************* +Finalizes HPC compuations of chunked gradient with HPCChunkedGradient(). +You have to call this function after calling HPCChunkedGradient() for +a new set of weights. You have to call it only once, see example below: + +HOW TO PROCESS DATASET WITH THIS FUNCTION: + Grad:=0 + HPCPrepareChunkedGradient(Weights, WCount, NTotal, NOut, Buf) + foreach chunk-of-dataset do + HPCChunkedGradient(...) + HPCFinalizeChunkedGradient(Buf, Grad) + +*************************************************************************/ +void hpcfinalizechunkedgradient(mlpbuffers* buf, + /* Real */ ae_vector* grad, + ae_state *_state) +{ + ae_int_t i; + + + if( !hpccores_hpcfinalizechunkedgradientx(&buf->hpcbuf, buf->wcount, grad, _state) ) + { + for(i=0; i<=buf->wcount-1; i++) { - *stp = ae_maxreal(*stx+0.66*(*sty-(*stx)), *stp, _state); + grad->ptr.p_double[i] = grad->ptr.p_double[i]+buf->hpcbuf.ptr.p_double[i]; } } } -void _linminstate_init(void* _p, ae_state *_state) +/************************************************************************* +Fast kernel for chunked gradient. + +*************************************************************************/ +ae_bool hpcchunkedgradient(/* Real */ ae_vector* weights, + /* Integer */ ae_vector* structinfo, + /* Real */ ae_vector* columnmeans, + /* Real */ ae_vector* columnsigmas, + /* Real */ ae_matrix* xy, + ae_int_t cstart, + ae_int_t csize, + /* Real */ ae_vector* batch4buf, + /* Real */ ae_vector* hpcbuf, + double* e, + ae_bool naturalerrorfunc, + ae_state *_state) { - linminstate *p = (linminstate*)_p; - ae_touch_ptr((void*)p); +#ifndef ALGLIB_INTERCEPTS_SSE2 + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_hpcchunkedgradient(weights, structinfo, columnmeans, columnsigmas, xy, cstart, csize, batch4buf, hpcbuf, e, naturalerrorfunc); +#endif } -void _linminstate_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Fast kernel for chunked processing. + +*************************************************************************/ +ae_bool hpcchunkedprocess(/* Real */ ae_vector* weights, + /* Integer */ ae_vector* structinfo, + /* Real */ ae_vector* columnmeans, + /* Real */ ae_vector* columnsigmas, + /* Real */ ae_matrix* xy, + ae_int_t cstart, + ae_int_t csize, + /* Real */ ae_vector* batch4buf, + /* Real */ ae_vector* hpcbuf, + ae_state *_state) { - linminstate *dst = (linminstate*)_dst; - linminstate *src = (linminstate*)_src; - dst->brackt = src->brackt; - dst->stage1 = src->stage1; - dst->infoc = src->infoc; - dst->dg = src->dg; - dst->dgm = src->dgm; - dst->dginit = src->dginit; - dst->dgtest = src->dgtest; - dst->dgx = src->dgx; - dst->dgxm = src->dgxm; - dst->dgy = src->dgy; - dst->dgym = src->dgym; - dst->finit = src->finit; - dst->ftest1 = src->ftest1; - dst->fm = src->fm; - dst->fx = src->fx; - dst->fxm = src->fxm; - dst->fy = src->fy; - dst->fym = src->fym; - dst->stx = src->stx; - dst->sty = src->sty; - dst->stmin = src->stmin; - dst->stmax = src->stmax; - dst->width = src->width; - dst->width1 = src->width1; - dst->xtrapf = src->xtrapf; +#ifndef ALGLIB_INTERCEPTS_SSE2 + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_hpcchunkedprocess(weights, structinfo, columnmeans, columnsigmas, xy, cstart, csize, batch4buf, hpcbuf); +#endif } -void _linminstate_clear(void* _p) +/************************************************************************* +Stub function. + + -- ALGLIB routine -- + 14.06.2013 + Bochkanov Sergey +*************************************************************************/ +static ae_bool hpccores_hpcpreparechunkedgradientx(/* Real */ ae_vector* weights, + ae_int_t wcount, + /* Real */ ae_vector* hpcbuf, + ae_state *_state) { - linminstate *p = (linminstate*)_p; - ae_touch_ptr((void*)p); +#ifndef ALGLIB_INTERCEPTS_SSE2 + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_hpcpreparechunkedgradientx(weights, wcount, hpcbuf); +#endif } -void _linminstate_destroy(void* _p) +/************************************************************************* +Stub function. + + -- ALGLIB routine -- + 14.06.2013 + Bochkanov Sergey +*************************************************************************/ +static ae_bool hpccores_hpcfinalizechunkedgradientx(/* Real */ ae_vector* buf, + ae_int_t wcount, + /* Real */ ae_vector* grad, + ae_state *_state) { - linminstate *p = (linminstate*)_p; - ae_touch_ptr((void*)p); +#ifndef ALGLIB_INTERCEPTS_SSE2 + ae_bool result; + + + result = ae_false; + return result; +#else + return _ialglib_i_hpcfinalizechunkedgradientx(buf, wcount, grad); +#endif } -void _armijostate_init(void* _p, ae_state *_state) +void _mlpbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic) { - armijostate *p = (armijostate*)_p; + mlpbuffers *p = (mlpbuffers*)_p; ae_touch_ptr((void*)p); - ae_vector_init(&p->x, 0, DT_REAL, _state); - ae_vector_init(&p->xbase, 0, DT_REAL, _state); - ae_vector_init(&p->s, 0, DT_REAL, _state); - _rcommstate_init(&p->rstate, _state); + ae_vector_init(&p->batch4buf, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->hpcbuf, 0, DT_REAL, _state, make_automatic); + ae_matrix_init(&p->xy, 0, 0, DT_REAL, _state, make_automatic); + ae_matrix_init(&p->xy2, 0, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->xyrow, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->y, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->desiredy, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic); } -void _armijostate_init_copy(void* _dst, void* _src, ae_state *_state) +void _mlpbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { - armijostate *dst = (armijostate*)_dst; - armijostate *src = (armijostate*)_src; - dst->needf = src->needf; - ae_vector_init_copy(&dst->x, &src->x, _state); - dst->f = src->f; - dst->n = src->n; - ae_vector_init_copy(&dst->xbase, &src->xbase, _state); - ae_vector_init_copy(&dst->s, &src->s, _state); - dst->stplen = src->stplen; - dst->fcur = src->fcur; - dst->stpmax = src->stpmax; - dst->fmax = src->fmax; - dst->nfev = src->nfev; - dst->info = src->info; - _rcommstate_init_copy(&dst->rstate, &src->rstate, _state); + mlpbuffers *dst = (mlpbuffers*)_dst; + mlpbuffers *src = (mlpbuffers*)_src; + dst->chunksize = src->chunksize; + dst->ntotal = src->ntotal; + dst->nin = src->nin; + dst->nout = src->nout; + dst->wcount = src->wcount; + ae_vector_init_copy(&dst->batch4buf, &src->batch4buf, _state, make_automatic); + ae_vector_init_copy(&dst->hpcbuf, &src->hpcbuf, _state, make_automatic); + ae_matrix_init_copy(&dst->xy, &src->xy, _state, make_automatic); + ae_matrix_init_copy(&dst->xy2, &src->xy2, _state, make_automatic); + ae_vector_init_copy(&dst->xyrow, &src->xyrow, _state, make_automatic); + ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); + ae_vector_init_copy(&dst->y, &src->y, _state, make_automatic); + ae_vector_init_copy(&dst->desiredy, &src->desiredy, _state, make_automatic); + dst->e = src->e; + ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic); + ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic); } -void _armijostate_clear(void* _p) +void _mlpbuffers_clear(void* _p) { - armijostate *p = (armijostate*)_p; + mlpbuffers *p = (mlpbuffers*)_p; ae_touch_ptr((void*)p); + ae_vector_clear(&p->batch4buf); + ae_vector_clear(&p->hpcbuf); + ae_matrix_clear(&p->xy); + ae_matrix_clear(&p->xy2); + ae_vector_clear(&p->xyrow); ae_vector_clear(&p->x); - ae_vector_clear(&p->xbase); - ae_vector_clear(&p->s); - _rcommstate_clear(&p->rstate); + ae_vector_clear(&p->y); + ae_vector_clear(&p->desiredy); + ae_vector_clear(&p->g); + ae_vector_clear(&p->tmp0); } -void _armijostate_destroy(void* _p) +void _mlpbuffers_destroy(void* _p) { - armijostate *p = (armijostate*)_p; + mlpbuffers *p = (mlpbuffers*)_p; ae_touch_ptr((void*)p); + ae_vector_destroy(&p->batch4buf); + ae_vector_destroy(&p->hpcbuf); + ae_matrix_destroy(&p->xy); + ae_matrix_destroy(&p->xy2); + ae_vector_destroy(&p->xyrow); ae_vector_destroy(&p->x); - ae_vector_destroy(&p->xbase); - ae_vector_destroy(&p->s); - _rcommstate_destroy(&p->rstate); + ae_vector_destroy(&p->y); + ae_vector_destroy(&p->desiredy); + ae_vector_destroy(&p->g); + ae_vector_destroy(&p->tmp0); } +#endif +#if defined(AE_COMPILE_NTHEORY) || !defined(AE_PARTIAL_BUILD) void findprimitiverootandinverse(ae_int_t n, @@ -13628,6 +13948,8 @@ } +#endif +#if defined(AE_COMPILE_FTBASE) || !defined(AE_PARTIAL_BUILD) /************************************************************************* @@ -13658,8 +13980,9 @@ ae_int_t precisize; ae_frame_make(_state, &_frame_block); + memset(&bluesteinbuf, 0, sizeof(bluesteinbuf)); _fasttransformplan_clear(plan); - _srealarray_init(&bluesteinbuf, _state); + _srealarray_init(&bluesteinbuf, _state, ae_true); /* @@ -14064,7 +14387,8 @@ ae_int_t row3; ae_frame_make(_state, &_frame_block); - _srealarray_init(&localbuf, _state); + memset(&localbuf, 0, sizeof(localbuf)); + _srealarray_init(&localbuf, _state, ae_true); ae_assert(n>0, "FTComplexFFTPlan: N<=0", _state); ae_assert(k>0, "FTComplexFFTPlan: K<=0", _state); @@ -14516,10 +14840,14 @@ ae_smart_ptr _bufd; ae_frame_make(_state, &_frame_block); - ae_smart_ptr_init(&_bufa, (void**)&bufa, _state); - ae_smart_ptr_init(&_bufb, (void**)&bufb, _state); - ae_smart_ptr_init(&_bufc, (void**)&bufc, _state); - ae_smart_ptr_init(&_bufd, (void**)&bufd, _state); + memset(&_bufa, 0, sizeof(_bufa)); + memset(&_bufb, 0, sizeof(_bufb)); + memset(&_bufc, 0, sizeof(_bufc)); + memset(&_bufd, 0, sizeof(_bufd)); + ae_smart_ptr_init(&_bufa, (void**)&bufa, _state, ae_true); + ae_smart_ptr_init(&_bufb, (void**)&bufb, _state, ae_true); + ae_smart_ptr_init(&_bufc, (void**)&bufc, _state, ae_true); + ae_smart_ptr_init(&_bufd, (void**)&bufd, _state, ae_true); ae_assert(plan->entries.ptr.pp_int[subplan][ftbase_coltype]==ftbase_opstart, "FTApplySubPlan: incorrect subplan header", _state); rowidx = subplan+1; @@ -15651,7 +15979,8 @@ fasttransformplan plan; ae_frame_make(_state, &_frame_block); - _fasttransformplan_init(&plan, _state); + memset(&plan, 0, sizeof(plan)); + _fasttransformplan_init(&plan, _state, ae_true); /* @@ -15850,7 +16179,8 @@ double v; ae_frame_make(_state, &_frame_block); - _fasttransformplan_init(&plan, _state); + memset(&plan, 0, sizeof(plan)); + _fasttransformplan_init(&plan, _state, ae_true); /* @@ -16562,27 +16892,27 @@ } -void _fasttransformplan_init(void* _p, ae_state *_state) +void _fasttransformplan_init(void* _p, ae_state *_state, ae_bool make_automatic) { fasttransformplan *p = (fasttransformplan*)_p; ae_touch_ptr((void*)p); - ae_matrix_init(&p->entries, 0, 0, DT_INT, _state); - ae_vector_init(&p->buffer, 0, DT_REAL, _state); - ae_vector_init(&p->precr, 0, DT_REAL, _state); - ae_vector_init(&p->preci, 0, DT_REAL, _state); - ae_shared_pool_init(&p->bluesteinpool, _state); + ae_matrix_init(&p->entries, 0, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->buffer, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->precr, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->preci, 0, DT_REAL, _state, make_automatic); + ae_shared_pool_init(&p->bluesteinpool, _state, make_automatic); } -void _fasttransformplan_init_copy(void* _dst, void* _src, ae_state *_state) +void _fasttransformplan_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { fasttransformplan *dst = (fasttransformplan*)_dst; fasttransformplan *src = (fasttransformplan*)_src; - ae_matrix_init_copy(&dst->entries, &src->entries, _state); - ae_vector_init_copy(&dst->buffer, &src->buffer, _state); - ae_vector_init_copy(&dst->precr, &src->precr, _state); - ae_vector_init_copy(&dst->preci, &src->preci, _state); - ae_shared_pool_init_copy(&dst->bluesteinpool, &src->bluesteinpool, _state); + ae_matrix_init_copy(&dst->entries, &src->entries, _state, make_automatic); + ae_vector_init_copy(&dst->buffer, &src->buffer, _state, make_automatic); + ae_vector_init_copy(&dst->precr, &src->precr, _state, make_automatic); + ae_vector_init_copy(&dst->preci, &src->preci, _state, make_automatic); + ae_shared_pool_init_copy(&dst->bluesteinpool, &src->bluesteinpool, _state, make_automatic); } @@ -16610,6 +16940,8 @@ } +#endif +#if defined(AE_COMPILE_NEARUNITYUNIT) || !defined(AE_PARTIAL_BUILD) double nulog1p(double x, ae_state *_state) @@ -16701,8 +17033,11 @@ } +#endif +#if defined(AE_COMPILE_ALGLIBBASICS) || !defined(AE_PARTIAL_BUILD) +#endif } diff -Nru alglib-3.10.0/src/alglibinternal.h alglib-3.16.0/src/alglibinternal.h --- alglib-3.10.0/src/alglibinternal.h 2015-08-19 12:24:21.000000000 +0000 +++ alglib-3.16.0/src/alglibinternal.h 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -29,6 +29,9 @@ ///////////////////////////////////////////////////////////////////////// namespace alglib_impl { +#if defined(AE_COMPILE_SCODES) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_APSERV) || !defined(AE_PARTIAL_BUILD) typedef struct { ae_vector ba0; @@ -75,25 +78,28 @@ { ae_vector val; } scomplexarray; -typedef struct -{ - ae_int_t chunksize; - ae_int_t ntotal; - ae_int_t nin; - ae_int_t nout; - ae_int_t wcount; - ae_vector batch4buf; - ae_vector hpcbuf; - ae_matrix xy; - ae_matrix xy2; - ae_vector xyrow; - ae_vector x; - ae_vector y; - ae_vector desiredy; - double e; - ae_vector g; - ae_vector tmp0; -} mlpbuffers; +#endif +#if defined(AE_COMPILE_TSORT) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_ABLASMKL) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_ABLASF) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_CREFLECTIONS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_ROTATIONS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_TRLINSOLVE) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_SAFESOLVE) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_HBLAS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_SBLAS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_BLAS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_LINMIN) || !defined(AE_PARTIAL_BUILD) typedef struct { ae_bool brackt; @@ -138,6 +144,35 @@ ae_int_t info; rcommstate rstate; } armijostate; +#endif +#if defined(AE_COMPILE_XBLAS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_BASICSTATOPS) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_HPCCORES) || !defined(AE_PARTIAL_BUILD) +typedef struct +{ + ae_int_t chunksize; + ae_int_t ntotal; + ae_int_t nin; + ae_int_t nout; + ae_int_t wcount; + ae_vector batch4buf; + ae_vector hpcbuf; + ae_matrix xy; + ae_matrix xy2; + ae_vector xyrow; + ae_vector x; + ae_vector y; + ae_vector desiredy; + double e; + ae_vector g; + ae_vector tmp0; +} mlpbuffers; +#endif +#if defined(AE_COMPILE_NTHEORY) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_FTBASE) || !defined(AE_PARTIAL_BUILD) typedef struct { ae_matrix entries; @@ -146,6 +181,11 @@ ae_vector preci; ae_shared_pool bluesteinpool; } fasttransformplan; +#endif +#if defined(AE_COMPILE_NEARUNITYUNIT) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_ALGLIBBASICS) || !defined(AE_PARTIAL_BUILD) +#endif } @@ -167,18 +207,31 @@ ///////////////////////////////////////////////////////////////////////// namespace alglib_impl { -ae_bool seterrorflag(ae_bool* flag, ae_bool cond, ae_state *_state); -ae_bool seterrorflagdiff(ae_bool* flag, +#if defined(AE_COMPILE_SCODES) || !defined(AE_PARTIAL_BUILD) +ae_int_t getrdfserializationcode(ae_state *_state); +ae_int_t getkdtreeserializationcode(ae_state *_state); +ae_int_t getmlpserializationcode(ae_state *_state); +ae_int_t getmlpeserializationcode(ae_state *_state); +ae_int_t getrbfserializationcode(ae_state *_state); +ae_int_t getspline2dserializationcode(ae_state *_state); +ae_int_t getidwserializationcode(ae_state *_state); +ae_int_t getknnserializationcode(ae_state *_state); +#endif +#if defined(AE_COMPILE_APSERV) || !defined(AE_PARTIAL_BUILD) +void seterrorflagdiff(ae_bool* flag, double val, double refval, double tol, double s, ae_state *_state); +ae_bool alwaysfalse(ae_state *_state); void touchint(ae_int_t* a, ae_state *_state); void touchreal(double* a, ae_state *_state); double coalesce(double a, double b, ae_state *_state); +ae_int_t coalescei(ae_int_t a, ae_int_t b, ae_state *_state); double inttoreal(ae_int_t a, ae_state *_state); double logbase2(double x, ae_state *_state); +ae_bool approxequal(double a, double b, double tol, ae_state *_state); ae_bool approxequalrel(double a, double b, double tol, ae_state *_state); void taskgenint1d(double a, double b, @@ -208,6 +261,9 @@ ae_int_t n, ae_state *_state); ae_bool aresameboolean(ae_bool v1, ae_bool v2, ae_state *_state); +void setlengthzero(/* Real */ ae_vector* x, + ae_int_t n, + ae_state *_state); void bvectorsetlengthatleast(/* Boolean */ ae_vector* x, ae_int_t n, ae_state *_state); @@ -221,6 +277,33 @@ ae_int_t m, ae_int_t n, ae_state *_state); +void bmatrixsetlengthatleast(/* Boolean */ ae_matrix* x, + ae_int_t m, + ae_int_t n, + ae_state *_state); +void bvectorgrowto(/* Boolean */ ae_vector* x, + ae_int_t n, + ae_state *_state); +void ivectorgrowto(/* Integer */ ae_vector* x, + ae_int_t n, + ae_state *_state); +void rmatrixgrowrowsto(/* Real */ ae_matrix* a, + ae_int_t n, + ae_int_t mincols, + ae_state *_state); +void rmatrixgrowcolsto(/* Real */ ae_matrix* a, + ae_int_t n, + ae_int_t minrows, + ae_state *_state); +void rvectorgrowto(/* Real */ ae_vector* x, + ae_int_t n, + ae_state *_state); +void ivectorresize(/* Integer */ ae_vector* x, + ae_int_t n, + ae_state *_state); +void rvectorresize(/* Real */ ae_vector* x, + ae_int_t n, + ae_state *_state); void rmatrixresize(/* Real */ ae_matrix* x, ae_int_t m, ae_int_t n, @@ -229,6 +312,9 @@ ae_int_t m, ae_int_t n, ae_state *_state); +void ivectorappend(/* Integer */ ae_vector* x, + ae_int_t v, + ae_state *_state); ae_bool isfinitevector(/* Real */ ae_vector* x, ae_int_t n, ae_state *_state); @@ -268,11 +354,55 @@ void randomunit(ae_int_t n, /* Real */ ae_vector* x, ae_state *_state); void swapi(ae_int_t* v0, ae_int_t* v1, ae_state *_state); void swapr(double* v0, double* v1, ae_state *_state); +void swaprows(/* Real */ ae_matrix* a, + ae_int_t i0, + ae_int_t i1, + ae_int_t ncols, + ae_state *_state); +void swapcols(/* Real */ ae_matrix* a, + ae_int_t j0, + ae_int_t j1, + ae_int_t nrows, + ae_state *_state); +void swapentries(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_int_t entrywidth, + ae_state *_state); +void swapelements(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_state *_state); +void swapelementsi(/* Integer */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_state *_state); double maxreal3(double v0, double v1, double v2, ae_state *_state); void inc(ae_int_t* v, ae_state *_state); void dec(ae_int_t* v, ae_state *_state); +void threadunsafeinc(ae_int_t* v, ae_state *_state); +void threadunsafeincby(ae_int_t* v, ae_int_t k, ae_state *_state); void countdown(ae_int_t* v, ae_state *_state); +double possign(double x, ae_state *_state); +double rmul2(double v0, double v1, ae_state *_state); +double rmul3(double v0, double v1, double v2, ae_state *_state); +ae_int_t idivup(ae_int_t a, ae_int_t b, ae_state *_state); +ae_int_t imin2(ae_int_t i0, ae_int_t i1, ae_state *_state); +ae_int_t imin3(ae_int_t i0, ae_int_t i1, ae_int_t i2, ae_state *_state); +ae_int_t imax2(ae_int_t i0, ae_int_t i1, ae_state *_state); +ae_int_t imax3(ae_int_t i0, ae_int_t i1, ae_int_t i2, ae_state *_state); +double rmax3(double r0, double r1, double r2, ae_state *_state); +double rmaxabs3(double r0, double r1, double r2, ae_state *_state); double boundval(double x, double b1, double b2, ae_state *_state); +ae_int_t iboundval(ae_int_t x, ae_int_t b1, ae_int_t b2, ae_state *_state); +double rboundval(double x, double b1, double b2, ae_state *_state); +ae_int_t countnz1(/* Real */ ae_vector* v, + ae_int_t n, + ae_state *_state); +ae_int_t countnz2(/* Real */ ae_matrix* v, + ae_int_t m, + ae_int_t n, + ae_state *_state); void alloccomplex(ae_serializer* s, ae_complex v, ae_state *_state); void serializecomplex(ae_serializer* s, ae_complex v, ae_state *_state); ae_complex unserializecomplex(ae_serializer* s, ae_state *_state); @@ -311,6 +441,9 @@ void unserializerealmatrix(ae_serializer* s, /* Real */ ae_matrix* v, ae_state *_state); +void copybooleanarray(/* Boolean */ ae_vector* src, + /* Boolean */ ae_vector* dst, + ae_state *_state); void copyintegerarray(/* Integer */ ae_vector* src, /* Integer */ ae_vector* dst, ae_state *_state); @@ -320,6 +453,14 @@ void copyrealmatrix(/* Real */ ae_matrix* src, /* Real */ ae_matrix* dst, ae_state *_state); +void unsetintegerarray(/* Integer */ ae_vector* a, ae_state *_state); +void unsetrealarray(/* Real */ ae_vector* a, ae_state *_state); +void unsetrealmatrix(/* Real */ ae_matrix* a, ae_state *_state); +void tiledsplit(ae_int_t tasksize, + ae_int_t tilesize, + ae_int_t* task0, + ae_int_t* task1, + ae_state *_state); ae_int_t recsearch(/* Integer */ ae_vector* a, ae_int_t nrec, ae_int_t nheader, @@ -331,55 +472,89 @@ ae_int_t* task0, ae_int_t* task1, ae_state *_state); +ae_int_t chunkscount(ae_int_t tasksize, + ae_int_t chunksize, + ae_state *_state); +double sparselevel2density(ae_state *_state); +ae_int_t matrixtilesizea(ae_state *_state); +ae_int_t matrixtilesizeb(ae_state *_state); +double smpactivationlevel(ae_state *_state); +double spawnlevel(ae_state *_state); void splitlength(ae_int_t tasksize, ae_int_t chunksize, ae_int_t* task0, ae_int_t* task1, ae_state *_state); -ae_int_t chunkscount(ae_int_t tasksize, - ae_int_t chunksize, +void tracevectorautoprec(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_state *_state); +void tracevectorunscaledunshiftedautoprec(/* Real */ ae_vector* x, + ae_int_t n, + /* Real */ ae_vector* scl, + ae_bool applyscl, + /* Real */ ae_vector* sft, + ae_bool applysft, + ae_state *_state); +void tracerownrm1autoprec(/* Real */ ae_matrix* a, + ae_int_t i0, + ae_int_t i1, + ae_int_t j0, + ae_int_t j1, + ae_state *_state); +void tracevectore6(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_state *_state); +void tracevectore615(/* Real */ ae_vector* a, + ae_int_t i0, + ae_int_t i1, + ae_bool usee15, + ae_state *_state); +void tracerownrm1e6(/* Real */ ae_matrix* a, + ae_int_t i0, + ae_int_t i1, + ae_int_t j0, + ae_int_t j1, ae_state *_state); -void _apbuffers_init(void* _p, ae_state *_state); -void _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state); +void _apbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _apbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _apbuffers_clear(void* _p); void _apbuffers_destroy(void* _p); -void _sboolean_init(void* _p, ae_state *_state); -void _sboolean_init_copy(void* _dst, void* _src, ae_state *_state); +void _sboolean_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _sboolean_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _sboolean_clear(void* _p); void _sboolean_destroy(void* _p); -void _sbooleanarray_init(void* _p, ae_state *_state); -void _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state); +void _sbooleanarray_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _sbooleanarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _sbooleanarray_clear(void* _p); void _sbooleanarray_destroy(void* _p); -void _sinteger_init(void* _p, ae_state *_state); -void _sinteger_init_copy(void* _dst, void* _src, ae_state *_state); +void _sinteger_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _sinteger_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _sinteger_clear(void* _p); void _sinteger_destroy(void* _p); -void _sintegerarray_init(void* _p, ae_state *_state); -void _sintegerarray_init_copy(void* _dst, void* _src, ae_state *_state); +void _sintegerarray_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _sintegerarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _sintegerarray_clear(void* _p); void _sintegerarray_destroy(void* _p); -void _sreal_init(void* _p, ae_state *_state); -void _sreal_init_copy(void* _dst, void* _src, ae_state *_state); +void _sreal_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _sreal_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _sreal_clear(void* _p); void _sreal_destroy(void* _p); -void _srealarray_init(void* _p, ae_state *_state); -void _srealarray_init_copy(void* _dst, void* _src, ae_state *_state); +void _srealarray_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _srealarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _srealarray_clear(void* _p); void _srealarray_destroy(void* _p); -void _scomplex_init(void* _p, ae_state *_state); -void _scomplex_init_copy(void* _dst, void* _src, ae_state *_state); +void _scomplex_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _scomplex_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _scomplex_clear(void* _p); void _scomplex_destroy(void* _p); -void _scomplexarray_init(void* _p, ae_state *_state); -void _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state); +void _scomplexarray_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _scomplexarray_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _scomplexarray_clear(void* _p); void _scomplexarray_destroy(void* _p); -ae_int_t getrdfserializationcode(ae_state *_state); -ae_int_t getkdtreeserializationcode(ae_state *_state); -ae_int_t getmlpserializationcode(ae_state *_state); -ae_int_t getmlpeserializationcode(ae_state *_state); -ae_int_t getrbfserializationcode(ae_state *_state); +#endif +#if defined(AE_COMPILE_TSORT) || !defined(AE_PARTIAL_BUILD) void tagsort(/* Real */ ae_vector* a, ae_int_t n, /* Integer */ ae_vector* p1, @@ -412,6 +587,10 @@ ae_int_t offset, ae_int_t n, ae_state *_state); +void sortmiddlei(/* Integer */ ae_vector* a, + ae_int_t offset, + ae_int_t n, + ae_state *_state); void tagheappushi(/* Real */ ae_vector* a, /* Integer */ ae_vector* b, ae_int_t* n, @@ -436,12 +615,20 @@ ae_int_t n, double t, ae_state *_state); -void rankx(/* Real */ ae_vector* x, +#endif +#if defined(AE_COMPILE_ABLASMKL) || !defined(AE_PARTIAL_BUILD) +ae_bool rmatrixgermkl(ae_int_t m, ae_int_t n, - ae_bool iscentered, - apbuffers* buf, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + double alpha, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, ae_state *_state); -ae_bool cmatrixrank1f(ae_int_t m, +ae_bool cmatrixrank1mkl(ae_int_t m, ae_int_t n, /* Complex */ ae_matrix* a, ae_int_t ia, @@ -451,7 +638,7 @@ /* Complex */ ae_vector* v, ae_int_t iv, ae_state *_state); -ae_bool rmatrixrank1f(ae_int_t m, +ae_bool rmatrixrank1mkl(ae_int_t m, ae_int_t n, /* Real */ ae_matrix* a, ae_int_t ia, @@ -461,7 +648,7 @@ /* Real */ ae_vector* v, ae_int_t iv, ae_state *_state); -ae_bool cmatrixmvf(ae_int_t m, +ae_bool cmatrixmvmkl(ae_int_t m, ae_int_t n, /* Complex */ ae_matrix* a, ae_int_t ia, @@ -472,7 +659,7 @@ /* Complex */ ae_vector* y, ae_int_t iy, ae_state *_state); -ae_bool rmatrixmvf(ae_int_t m, +ae_bool rmatrixmvmkl(ae_int_t m, ae_int_t n, /* Real */ ae_matrix* a, ae_int_t ia, @@ -483,81 +670,56 @@ /* Real */ ae_vector* y, ae_int_t iy, ae_state *_state); -ae_bool cmatrixrighttrsmf(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Complex */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state); -ae_bool cmatrixlefttrsmf(ae_int_t m, - ae_int_t n, - /* Complex */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Complex */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, - ae_state *_state); -ae_bool rmatrixrighttrsmf(ae_int_t m, +ae_bool rmatrixgemvmkl(ae_int_t m, ae_int_t n, + double alpha, /* Real */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, - ae_bool isupper, - ae_bool isunit, - ae_int_t optype, - /* Real */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, + ae_int_t ia, + ae_int_t ja, + ae_int_t opa, + /* Real */ ae_vector* x, + ae_int_t ix, + double beta, + /* Real */ ae_vector* y, + ae_int_t iy, ae_state *_state); -ae_bool rmatrixlefttrsmf(ae_int_t m, - ae_int_t n, +ae_bool rmatrixtrsvmkl(ae_int_t n, /* Real */ ae_matrix* a, - ae_int_t i1, - ae_int_t j1, + ae_int_t ia, + ae_int_t ja, ae_bool isupper, ae_bool isunit, ae_int_t optype, - /* Real */ ae_matrix* x, - ae_int_t i2, - ae_int_t j2, + /* Real */ ae_vector* x, + ae_int_t ix, ae_state *_state); -ae_bool cmatrixherkf(ae_int_t n, +ae_bool rmatrixsyrkmkl(ae_int_t n, ae_int_t k, double alpha, - /* Complex */ ae_matrix* a, + /* Real */ ae_matrix* a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, - /* Complex */ ae_matrix* c, + /* Real */ ae_matrix* c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state); -ae_bool rmatrixsyrkf(ae_int_t n, +ae_bool cmatrixherkmkl(ae_int_t n, ae_int_t k, double alpha, - /* Real */ ae_matrix* a, + /* Complex */ ae_matrix* a, ae_int_t ia, ae_int_t ja, ae_int_t optypea, double beta, - /* Real */ ae_matrix* c, + /* Complex */ ae_matrix* c, ae_int_t ic, ae_int_t jc, ae_bool isupper, ae_state *_state); -ae_bool rmatrixgemmf(ae_int_t m, +ae_bool rmatrixgemmmkl(ae_int_t m, ae_int_t n, ae_int_t k, double alpha, @@ -574,24 +736,19 @@ ae_int_t ic, ae_int_t jc, ae_state *_state); -ae_bool cmatrixgemmf(ae_int_t m, - ae_int_t n, - ae_int_t k, - ae_complex alpha, - /* Complex */ ae_matrix* a, +ae_bool rmatrixsymvmkl(ae_int_t n, + double alpha, + /* Real */ ae_matrix* a, ae_int_t ia, ae_int_t ja, - ae_int_t optypea, - /* Complex */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - ae_complex beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, + ae_bool isupper, + /* Real */ ae_vector* x, + ae_int_t ix, + double beta, + /* Real */ ae_vector* y, + ae_int_t iy, ae_state *_state); -void cmatrixgemmk(ae_int_t m, +ae_bool cmatrixgemmmkl(ae_int_t m, ae_int_t n, ae_int_t k, ae_complex alpha, @@ -608,144 +765,7 @@ ae_int_t ic, ae_int_t jc, ae_state *_state); -void rmatrixgemmk(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -void rmatrixgemmk44v00(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -void rmatrixgemmk44v01(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -void rmatrixgemmk44v10(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -void rmatrixgemmk44v11(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -ae_bool rmatrixsyrkmkl(ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_bool isupper, - ae_state *_state); -ae_bool cmatrixherkmkl(ae_int_t n, - ae_int_t k, - double alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - double beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_bool isupper, - ae_state *_state); -ae_bool rmatrixgemmmkl(ae_int_t m, - ae_int_t n, - ae_int_t k, - double alpha, - /* Real */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Real */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - double beta, - /* Real */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -ae_bool cmatrixgemmmkl(ae_int_t m, - ae_int_t n, - ae_int_t k, - ae_complex alpha, - /* Complex */ ae_matrix* a, - ae_int_t ia, - ae_int_t ja, - ae_int_t optypea, - /* Complex */ ae_matrix* b, - ae_int_t ib, - ae_int_t jb, - ae_int_t optypeb, - ae_complex beta, - /* Complex */ ae_matrix* c, - ae_int_t ic, - ae_int_t jc, - ae_state *_state); -ae_bool cmatrixlefttrsmmkl(ae_int_t m, +ae_bool cmatrixlefttrsmmkl(ae_int_t m, ae_int_t n, /* Complex */ ae_matrix* a, ae_int_t i1, @@ -890,13 +910,393 @@ ae_int_t* m, ae_int_t* info, ae_state *_state); -ae_bool smatrixtdevdmkl(/* Real */ ae_vector* d, - /* Real */ ae_vector* e, - ae_int_t n, - ae_int_t zneeded, - /* Real */ ae_matrix* z, - ae_bool* evdresult, +ae_bool smatrixtdevdmkl(/* Real */ ae_vector* d, + /* Real */ ae_vector* e, + ae_int_t n, + ae_int_t zneeded, + /* Real */ ae_matrix* z, + ae_bool* evdresult, + ae_state *_state); +ae_bool sparsegemvcrsmkl(ae_int_t opa, + ae_int_t arows, + ae_int_t acols, + double alpha, + /* Real */ ae_vector* vals, + /* Integer */ ae_vector* cidx, + /* Integer */ ae_vector* ridx, + /* Real */ ae_vector* x, + ae_int_t ix, + double beta, + /* Real */ ae_vector* y, + ae_int_t iy, + ae_state *_state); +#endif +#if defined(AE_COMPILE_ABLASF) || !defined(AE_PARTIAL_BUILD) +ae_bool rmatrixgerf(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + double ralpha, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, + ae_state *_state); +ae_bool cmatrixrank1f(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Complex */ ae_vector* u, + ae_int_t iu, + /* Complex */ ae_vector* v, + ae_int_t iv, + ae_state *_state); +ae_bool rmatrixrank1f(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_vector* u, + ae_int_t iu, + /* Real */ ae_vector* v, + ae_int_t iv, + ae_state *_state); +ae_bool cmatrixrighttrsmf(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Complex */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, + ae_state *_state); +ae_bool cmatrixlefttrsmf(ae_int_t m, + ae_int_t n, + /* Complex */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Complex */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, + ae_state *_state); +ae_bool rmatrixrighttrsmf(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, + ae_state *_state); +ae_bool rmatrixlefttrsmf(ae_int_t m, + ae_int_t n, + /* Real */ ae_matrix* a, + ae_int_t i1, + ae_int_t j1, + ae_bool isupper, + ae_bool isunit, + ae_int_t optype, + /* Real */ ae_matrix* x, + ae_int_t i2, + ae_int_t j2, + ae_state *_state); +ae_bool cmatrixherkf(ae_int_t n, + ae_int_t k, + double alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + double beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_bool isupper, + ae_state *_state); +ae_bool rmatrixsyrkf(ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_bool isupper, + ae_state *_state); +ae_bool rmatrixgemmf(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +ae_bool cmatrixgemmf(ae_int_t m, + ae_int_t n, + ae_int_t k, + ae_complex alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Complex */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + ae_complex beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +void cmatrixgemmk(ae_int_t m, + ae_int_t n, + ae_int_t k, + ae_complex alpha, + /* Complex */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Complex */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + ae_complex beta, + /* Complex */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +void rmatrixgemmk(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + ae_int_t optypea, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + ae_int_t optypeb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +void rmatrixgemmk44v00(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +void rmatrixgemmk44v01(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +void rmatrixgemmk44v10(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +void rmatrixgemmk44v11(ae_int_t m, + ae_int_t n, + ae_int_t k, + double alpha, + /* Real */ ae_matrix* a, + ae_int_t ia, + ae_int_t ja, + /* Real */ ae_matrix* b, + ae_int_t ib, + ae_int_t jb, + double beta, + /* Real */ ae_matrix* c, + ae_int_t ic, + ae_int_t jc, + ae_state *_state); +#endif +#if defined(AE_COMPILE_CREFLECTIONS) || !defined(AE_PARTIAL_BUILD) +void complexgeneratereflection(/* Complex */ ae_vector* x, + ae_int_t n, + ae_complex* tau, + ae_state *_state); +void complexapplyreflectionfromtheleft(/* Complex */ ae_matrix* c, + ae_complex tau, + /* Complex */ ae_vector* v, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Complex */ ae_vector* work, + ae_state *_state); +void complexapplyreflectionfromtheright(/* Complex */ ae_matrix* c, + ae_complex tau, + /* Complex */ ae_vector* v, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Complex */ ae_vector* work, + ae_state *_state); +#endif +#if defined(AE_COMPILE_ROTATIONS) || !defined(AE_PARTIAL_BUILD) +void applyrotationsfromtheleft(ae_bool isforward, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Real */ ae_vector* c, + /* Real */ ae_vector* s, + /* Real */ ae_matrix* a, + /* Real */ ae_vector* work, + ae_state *_state); +void applyrotationsfromtheright(ae_bool isforward, + ae_int_t m1, + ae_int_t m2, + ae_int_t n1, + ae_int_t n2, + /* Real */ ae_vector* c, + /* Real */ ae_vector* s, + /* Real */ ae_matrix* a, + /* Real */ ae_vector* work, + ae_state *_state); +void generaterotation(double f, + double g, + double* cs, + double* sn, + double* r, + ae_state *_state); +#endif +#if defined(AE_COMPILE_TRLINSOLVE) || !defined(AE_PARTIAL_BUILD) +void rmatrixtrsafesolve(/* Real */ ae_matrix* a, + ae_int_t n, + /* Real */ ae_vector* x, + double* s, + ae_bool isupper, + ae_bool istrans, + ae_bool isunit, + ae_state *_state); +void safesolvetriangular(/* Real */ ae_matrix* a, + ae_int_t n, + /* Real */ ae_vector* x, + double* s, + ae_bool isupper, + ae_bool istrans, + ae_bool isunit, + ae_bool normin, + /* Real */ ae_vector* cnorm, + ae_state *_state); +#endif +#if defined(AE_COMPILE_SAFESOLVE) || !defined(AE_PARTIAL_BUILD) +ae_bool rmatrixscaledtrsafesolve(/* Real */ ae_matrix* a, + double sa, + ae_int_t n, + /* Real */ ae_vector* x, + ae_bool isupper, + ae_int_t trans, + ae_bool isunit, + double maxgrowth, + ae_state *_state); +ae_bool cmatrixscaledtrsafesolve(/* Complex */ ae_matrix* a, + double sa, + ae_int_t n, + /* Complex */ ae_vector* x, + ae_bool isupper, + ae_int_t trans, + ae_bool isunit, + double maxgrowth, + ae_state *_state); +#endif +#if defined(AE_COMPILE_HBLAS) || !defined(AE_PARTIAL_BUILD) +void hermitianmatrixvectormultiply(/* Complex */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Complex */ ae_vector* x, + ae_complex alpha, + /* Complex */ ae_vector* y, + ae_state *_state); +void hermitianrank2update(/* Complex */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Complex */ ae_vector* x, + /* Complex */ ae_vector* y, + /* Complex */ ae_vector* t, + ae_complex alpha, + ae_state *_state); +#endif +#if defined(AE_COMPILE_SBLAS) || !defined(AE_PARTIAL_BUILD) +void symmetricmatrixvectormultiply(/* Real */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Real */ ae_vector* x, + double alpha, + /* Real */ ae_vector* y, + ae_state *_state); +void symmetricrank2update(/* Real */ ae_matrix* a, + ae_bool isupper, + ae_int_t i1, + ae_int_t i2, + /* Real */ ae_vector* x, + /* Real */ ae_vector* y, + /* Real */ ae_vector* t, + double alpha, ae_state *_state); +#endif +#if defined(AE_COMPILE_BLAS) || !defined(AE_PARTIAL_BUILD) double vectornorm2(/* Real */ ae_vector* x, ae_int_t i1, ae_int_t i2, @@ -988,168 +1388,78 @@ double beta, /* Real */ ae_vector* work, ae_state *_state); -void hermitianmatrixvectormultiply(/* Complex */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, - /* Complex */ ae_vector* x, - ae_complex alpha, - /* Complex */ ae_vector* y, - ae_state *_state); -void hermitianrank2update(/* Complex */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, - /* Complex */ ae_vector* x, - /* Complex */ ae_vector* y, - /* Complex */ ae_vector* t, - ae_complex alpha, - ae_state *_state); -void generatereflection(/* Real */ ae_vector* x, - ae_int_t n, - double* tau, - ae_state *_state); -void applyreflectionfromtheleft(/* Real */ ae_matrix* c, - double tau, - /* Real */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* work, - ae_state *_state); -void applyreflectionfromtheright(/* Real */ ae_matrix* c, - double tau, - /* Real */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* work, - ae_state *_state); -void complexgeneratereflection(/* Complex */ ae_vector* x, +#endif +#if defined(AE_COMPILE_LINMIN) || !defined(AE_PARTIAL_BUILD) +void linminnormalized(/* Real */ ae_vector* d, + double* stp, ae_int_t n, - ae_complex* tau, - ae_state *_state); -void complexapplyreflectionfromtheleft(/* Complex */ ae_matrix* c, - ae_complex tau, - /* Complex */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Complex */ ae_vector* work, - ae_state *_state); -void complexapplyreflectionfromtheright(/* Complex */ ae_matrix* c, - ae_complex tau, - /* Complex */ ae_vector* v, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Complex */ ae_vector* work, - ae_state *_state); -void symmetricmatrixvectormultiply(/* Real */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, - /* Real */ ae_vector* x, - double alpha, - /* Real */ ae_vector* y, ae_state *_state); -void symmetricrank2update(/* Real */ ae_matrix* a, - ae_bool isupper, - ae_int_t i1, - ae_int_t i2, +void mcsrch(ae_int_t n, /* Real */ ae_vector* x, - /* Real */ ae_vector* y, - /* Real */ ae_vector* t, - double alpha, - ae_state *_state); -void applyrotationsfromtheleft(ae_bool isforward, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* c, - /* Real */ ae_vector* s, - /* Real */ ae_matrix* a, - /* Real */ ae_vector* work, - ae_state *_state); -void applyrotationsfromtheright(ae_bool isforward, - ae_int_t m1, - ae_int_t m2, - ae_int_t n1, - ae_int_t n2, - /* Real */ ae_vector* c, + double* f, + /* Real */ ae_vector* g, /* Real */ ae_vector* s, - /* Real */ ae_matrix* a, - /* Real */ ae_vector* work, - ae_state *_state); -void generaterotation(double f, - double g, - double* cs, - double* sn, - double* r, - ae_state *_state); -void rmatrixinternalschurdecomposition(/* Real */ ae_matrix* h, - ae_int_t n, - ae_int_t tneeded, - ae_int_t zneeded, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - /* Real */ ae_matrix* z, + double* stp, + double stpmax, + double gtol, ae_int_t* info, + ae_int_t* nfev, + /* Real */ ae_vector* wa, + linminstate* state, + ae_int_t* stage, ae_state *_state); -ae_bool upperhessenbergschurdecomposition(/* Real */ ae_matrix* h, - ae_int_t n, - /* Real */ ae_matrix* s, +void armijocreate(ae_int_t n, + /* Real */ ae_vector* x, + double f, + /* Real */ ae_vector* s, + double stp, + double stpmax, + ae_int_t fmax, + armijostate* state, ae_state *_state); -void internalschurdecomposition(/* Real */ ae_matrix* h, - ae_int_t n, - ae_int_t tneeded, - ae_int_t zneeded, - /* Real */ ae_vector* wr, - /* Real */ ae_vector* wi, - /* Real */ ae_matrix* z, +ae_bool armijoiteration(armijostate* state, ae_state *_state); +void armijoresults(armijostate* state, ae_int_t* info, + double* stp, + double* f, ae_state *_state); -void rmatrixtrsafesolve(/* Real */ ae_matrix* a, +void _linminstate_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _linminstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); +void _linminstate_clear(void* _p); +void _linminstate_destroy(void* _p); +void _armijostate_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _armijostate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); +void _armijostate_clear(void* _p); +void _armijostate_destroy(void* _p); +#endif +#if defined(AE_COMPILE_XBLAS) || !defined(AE_PARTIAL_BUILD) +void xdot(/* Real */ ae_vector* a, + /* Real */ ae_vector* b, ae_int_t n, - /* Real */ ae_vector* x, - double* s, - ae_bool isupper, - ae_bool istrans, - ae_bool isunit, + /* Real */ ae_vector* temp, + double* r, + double* rerr, ae_state *_state); -void safesolvetriangular(/* Real */ ae_matrix* a, +void xcdot(/* Complex */ ae_vector* a, + /* Complex */ ae_vector* b, ae_int_t n, - /* Real */ ae_vector* x, - double* s, - ae_bool isupper, - ae_bool istrans, - ae_bool isunit, - ae_bool normin, - /* Real */ ae_vector* cnorm, + /* Real */ ae_vector* temp, + ae_complex* r, + double* rerr, ae_state *_state); -ae_bool rmatrixscaledtrsafesolve(/* Real */ ae_matrix* a, - double sa, +#endif +#if defined(AE_COMPILE_BASICSTATOPS) || !defined(AE_PARTIAL_BUILD) +void rankx(/* Real */ ae_vector* x, ae_int_t n, - /* Real */ ae_vector* x, - ae_bool isupper, - ae_int_t trans, - ae_bool isunit, - double maxgrowth, + ae_bool iscentered, + apbuffers* buf, ae_state *_state); -ae_bool cmatrixscaledtrsafesolve(/* Complex */ ae_matrix* a, - double sa, +void rankxuntied(/* Real */ ae_vector* x, ae_int_t n, - /* Complex */ ae_vector* x, - ae_bool isupper, - ae_int_t trans, - ae_bool isunit, - double maxgrowth, + apbuffers* buf, ae_state *_state); +#endif +#if defined(AE_COMPILE_HPCCORES) || !defined(AE_PARTIAL_BUILD) void hpcpreparechunkedgradient(/* Real */ ae_vector* weights, ae_int_t wcount, ae_int_t ntotal, @@ -1182,69 +1492,18 @@ /* Real */ ae_vector* batch4buf, /* Real */ ae_vector* hpcbuf, ae_state *_state); -void _mlpbuffers_init(void* _p, ae_state *_state); -void _mlpbuffers_init_copy(void* _dst, void* _src, ae_state *_state); +void _mlpbuffers_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _mlpbuffers_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _mlpbuffers_clear(void* _p); void _mlpbuffers_destroy(void* _p); -void xdot(/* Real */ ae_vector* a, - /* Real */ ae_vector* b, - ae_int_t n, - /* Real */ ae_vector* temp, - double* r, - double* rerr, - ae_state *_state); -void xcdot(/* Complex */ ae_vector* a, - /* Complex */ ae_vector* b, - ae_int_t n, - /* Real */ ae_vector* temp, - ae_complex* r, - double* rerr, - ae_state *_state); -void linminnormalized(/* Real */ ae_vector* d, - double* stp, - ae_int_t n, - ae_state *_state); -void mcsrch(ae_int_t n, - /* Real */ ae_vector* x, - double* f, - /* Real */ ae_vector* g, - /* Real */ ae_vector* s, - double* stp, - double stpmax, - double gtol, - ae_int_t* info, - ae_int_t* nfev, - /* Real */ ae_vector* wa, - linminstate* state, - ae_int_t* stage, - ae_state *_state); -void armijocreate(ae_int_t n, - /* Real */ ae_vector* x, - double f, - /* Real */ ae_vector* s, - double stp, - double stpmax, - ae_int_t fmax, - armijostate* state, - ae_state *_state); -ae_bool armijoiteration(armijostate* state, ae_state *_state); -void armijoresults(armijostate* state, - ae_int_t* info, - double* stp, - double* f, - ae_state *_state); -void _linminstate_init(void* _p, ae_state *_state); -void _linminstate_init_copy(void* _dst, void* _src, ae_state *_state); -void _linminstate_clear(void* _p); -void _linminstate_destroy(void* _p); -void _armijostate_init(void* _p, ae_state *_state); -void _armijostate_init_copy(void* _dst, void* _src, ae_state *_state); -void _armijostate_clear(void* _p); -void _armijostate_destroy(void* _p); +#endif +#if defined(AE_COMPILE_NTHEORY) || !defined(AE_PARTIAL_BUILD) void findprimitiverootandinverse(ae_int_t n, ae_int_t* proot, ae_int_t* invproot, ae_state *_state); +#endif +#if defined(AE_COMPILE_FTBASE) || !defined(AE_PARTIAL_BUILD) void ftcomplexfftplan(ae_int_t n, ae_int_t k, fasttransformplan* plan, @@ -1263,13 +1522,18 @@ ae_int_t ftbasefindsmooth(ae_int_t n, ae_state *_state); ae_int_t ftbasefindsmootheven(ae_int_t n, ae_state *_state); double ftbasegetflopestimate(ae_int_t n, ae_state *_state); -void _fasttransformplan_init(void* _p, ae_state *_state); -void _fasttransformplan_init_copy(void* _dst, void* _src, ae_state *_state); +void _fasttransformplan_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _fasttransformplan_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _fasttransformplan_clear(void* _p); void _fasttransformplan_destroy(void* _p); +#endif +#if defined(AE_COMPILE_NEARUNITYUNIT) || !defined(AE_PARTIAL_BUILD) double nulog1p(double x, ae_state *_state); double nuexpm1(double x, ae_state *_state); double nucosm1(double x, ae_state *_state); +#endif +#if defined(AE_COMPILE_ALGLIBBASICS) || !defined(AE_PARTIAL_BUILD) +#endif } #endif diff -Nru alglib-3.10.0/src/alglibmisc.cpp alglib-3.16.0/src/alglibmisc.cpp --- alglib-3.10.0/src/alglibmisc.cpp 2015-08-19 12:24:22.000000000 +0000 +++ alglib-3.16.0/src/alglibmisc.cpp 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -17,17 +17,20 @@ http://www.fsf.org/licensing/licenses >>> END OF LICENSE >>> *************************************************************************/ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS +#endif #include "stdafx.h" #include "alglibmisc.h" // disable some irrelevant warnings -#if (AE_COMPILER==AE_MSVC) +#if (AE_COMPILER==AE_MSVC) && !defined(AE_ALL_WARNINGS) #pragma warning(disable:4100) #pragma warning(disable:4127) +#pragma warning(disable:4611) #pragma warning(disable:4702) #pragma warning(disable:4996) #endif -using namespace std; ///////////////////////////////////////////////////////////////////////// // @@ -37,529 +40,479 @@ namespace alglib { +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) + +#endif +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Portable high quality random number generator state. -Initialized with HQRNDRandomize() or HQRNDSeed(). +Buffer object which is used to perform nearest neighbor requests in the +multithreaded mode (multiple threads working with same KD-tree object). -Fields: - S1, S2 - seed values - V - precomputed value - MagicV - 'magic' value used to determine whether State structure - was correctly initialized. +This object should be created with KDTreeCreateRequestBuffer(). *************************************************************************/ -_hqrndstate_owner::_hqrndstate_owner() +_kdtreerequestbuffer_owner::_kdtreerequestbuffer_owner() { - p_struct = (alglib_impl::hqrndstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::hqrndstate), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_hqrndstate_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_kdtreerequestbuffer_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::kdtreerequestbuffer*)alglib_impl::ae_malloc(sizeof(alglib_impl::kdtreerequestbuffer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::kdtreerequestbuffer)); + alglib_impl::_kdtreerequestbuffer_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } -_hqrndstate_owner::_hqrndstate_owner(const _hqrndstate_owner &rhs) +_kdtreerequestbuffer_owner::_kdtreerequestbuffer_owner(const _kdtreerequestbuffer_owner &rhs) { - p_struct = (alglib_impl::hqrndstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::hqrndstate), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_hqrndstate_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_kdtreerequestbuffer_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: kdtreerequestbuffer copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::kdtreerequestbuffer*)alglib_impl::ae_malloc(sizeof(alglib_impl::kdtreerequestbuffer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::kdtreerequestbuffer)); + alglib_impl::_kdtreerequestbuffer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -_hqrndstate_owner& _hqrndstate_owner::operator=(const _hqrndstate_owner &rhs) +_kdtreerequestbuffer_owner& _kdtreerequestbuffer_owner::operator=(const _kdtreerequestbuffer_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_hqrndstate_clear(p_struct); - alglib_impl::_hqrndstate_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: kdtreerequestbuffer assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: kdtreerequestbuffer assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_kdtreerequestbuffer_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::kdtreerequestbuffer)); + alglib_impl::_kdtreerequestbuffer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } -_hqrndstate_owner::~_hqrndstate_owner() +_kdtreerequestbuffer_owner::~_kdtreerequestbuffer_owner() { - alglib_impl::_hqrndstate_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_kdtreerequestbuffer_destroy(p_struct); + ae_free(p_struct); + } } -alglib_impl::hqrndstate* _hqrndstate_owner::c_ptr() +alglib_impl::kdtreerequestbuffer* _kdtreerequestbuffer_owner::c_ptr() { return p_struct; } -alglib_impl::hqrndstate* _hqrndstate_owner::c_ptr() const +alglib_impl::kdtreerequestbuffer* _kdtreerequestbuffer_owner::c_ptr() const { - return const_cast(p_struct); + return const_cast(p_struct); } -hqrndstate::hqrndstate() : _hqrndstate_owner() +kdtreerequestbuffer::kdtreerequestbuffer() : _kdtreerequestbuffer_owner() { } -hqrndstate::hqrndstate(const hqrndstate &rhs):_hqrndstate_owner(rhs) +kdtreerequestbuffer::kdtreerequestbuffer(const kdtreerequestbuffer &rhs):_kdtreerequestbuffer_owner(rhs) { } -hqrndstate& hqrndstate::operator=(const hqrndstate &rhs) +kdtreerequestbuffer& kdtreerequestbuffer::operator=(const kdtreerequestbuffer &rhs) { if( this==&rhs ) return *this; - _hqrndstate_owner::operator=(rhs); + _kdtreerequestbuffer_owner::operator=(rhs); return *this; } -hqrndstate::~hqrndstate() +kdtreerequestbuffer::~kdtreerequestbuffer() { } -/************************************************************************* -HQRNDState initialization with random values which come from standard -RNG. - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +/************************************************************************* +KD-tree object. *************************************************************************/ -void hqrndrandomize(hqrndstate &state) +_kdtree_owner::_kdtree_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::hqrndrandomize(const_cast(state.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_kdtree_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::kdtree*)alglib_impl::ae_malloc(sizeof(alglib_impl::kdtree), &_state); + memset(p_struct, 0, sizeof(alglib_impl::kdtree)); + alglib_impl::_kdtree_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } -/************************************************************************* -HQRNDState initialization with seed values - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -void hqrndseed(const ae_int_t s1, const ae_int_t s2, hqrndstate &state) +_kdtree_owner::_kdtree_owner(const _kdtree_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::hqrndseed(s1, s2, const_cast(state.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_kdtree_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: kdtree copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::kdtree*)alglib_impl::ae_malloc(sizeof(alglib_impl::kdtree), &_state); + memset(p_struct, 0, sizeof(alglib_impl::kdtree)); + alglib_impl::_kdtree_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -/************************************************************************* -This function generates random real number in (0,1), -not including interval boundaries - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -double hqrnduniformr(const hqrndstate &state) +_kdtree_owner& _kdtree_owner::operator=(const _kdtree_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - double result = alglib_impl::hqrnduniformr(const_cast(state.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: kdtree assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: kdtree assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_kdtree_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::kdtree)); + alglib_impl::_kdtree_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_kdtree_owner::~_kdtree_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_kdtree_destroy(p_struct); + ae_free(p_struct); } } -/************************************************************************* -This function generates random integer number in [0, N) +alglib_impl::kdtree* _kdtree_owner::c_ptr() +{ + return p_struct; +} -1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() -2. N can be any positive number except for very large numbers: - * close to 2^31 on 32-bit systems - * close to 2^62 on 64-bit systems - An exception will be generated if N is too large. +alglib_impl::kdtree* _kdtree_owner::c_ptr() const +{ + return const_cast(p_struct); +} +kdtree::kdtree() : _kdtree_owner() +{ +} - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -ae_int_t hqrnduniformi(const hqrndstate &state, const ae_int_t n) +kdtree::kdtree(const kdtree &rhs):_kdtree_owner(rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::hqrnduniformi(const_cast(state.c_ptr()), n, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } } -/************************************************************************* -Random number generator: normal numbers +kdtree& kdtree::operator=(const kdtree &rhs) +{ + if( this==&rhs ) + return *this; + _kdtree_owner::operator=(rhs); + return *this; +} -This function generates one random number from normal distribution. -Its performance is equal to that of HQRNDNormal2() +kdtree::~kdtree() +{ +} -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +/************************************************************************* +This function serializes data structure to string. + +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. *************************************************************************/ -double hqrndnormal(const hqrndstate &state) +void kdtreeserialize(kdtree &obj, std::string &s_out) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::hqrndnormal(const_cast(state.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + alglib_impl::ae_int_t ssize; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::kdtreealloc(&serializer, obj.c_ptr(), &state); + ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); + s_out.clear(); + s_out.reserve((size_t)(ssize+1)); + alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); + alglib_impl::kdtreeserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_assert( s_out.length()<=(size_t)ssize, "ALGLIB: serialization integrity error", &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } - /************************************************************************* -Random number generator: random X and Y such that X^2+Y^2=1 - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +This function unserializes data structure from string. *************************************************************************/ -void hqrndunit2(const hqrndstate &state, double &x, double &y) +void kdtreeunserialize(const std::string &s_in, kdtree &obj) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - alglib_impl::hqrndunit2(const_cast(state.c_ptr()), &x, &y, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); + alglib_impl::kdtreeunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } -/************************************************************************* -Random number generator: normal numbers -This function generates two independent random numbers from normal -distribution. Its performance is equal to that of HQRNDNormal() +/************************************************************************* +This function serializes data structure to C++ stream. -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +Data stream generated by this function is same as string representation +generated by string version of serializer - alphanumeric characters, +dots, underscores, minus signs, which are grouped into words separated by +spaces and CR+LF. - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey +We recommend you to read comments on string version of serializer to find +out more about serialization of AlGLIB objects. *************************************************************************/ -void hqrndnormal2(const hqrndstate &state, double &x1, double &x2) +void kdtreeserialize(kdtree &obj, std::ostream &s_out) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - alglib_impl::hqrndnormal2(const_cast(state.c_ptr()), &x1, &x2, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::kdtreealloc(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_get_alloc_size(&serializer); // not actually needed, but we have to ask + alglib_impl::ae_serializer_sstart_stream(&serializer, &s_out); + alglib_impl::kdtreeserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} +/************************************************************************* +This function unserializes data structure from stream. +*************************************************************************/ +void kdtreeunserialize(const std::istream &s_in, kdtree &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_stream(&serializer, &s_in); + alglib_impl::kdtreeunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } /************************************************************************* -Random number generator: exponential distribution +KD-tree creation + +This subroutine creates KD-tree from set of X-values and optional Y-values + +INPUT PARAMETERS + XY - dataset, array[0..N-1,0..NX+NY-1]. + one row corresponds to one point. + first NX columns contain X-values, next NY (NY may be zero) + columns may contain associated Y-values + N - number of points, N>=0. + NX - space dimension, NX>=1. + NY - number of optional Y-values, NY>=0. + NormType- norm type: + * 0 denotes infinity-norm + * 1 denotes 1-norm + * 2 denotes 2-norm (Euclidean norm) + +OUTPUT PARAMETERS + KDT - KD-tree -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + +NOTES + +1. KD-tree creation have O(N*logN) complexity and O(N*(2*NX+NY)) memory + requirements. +2. Although KD-trees may be used with any combination of N and NX, they + are more efficient than brute-force search only when N >> 4^NX. So they + are most useful in low-dimensional tasks (NX=2, NX=3). NX=1 is another + inefficient case, because simple binary search (without additional + structures) is much more efficient in such tasks than KD-trees. -- ALGLIB -- - Copyright 11.08.2007 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -double hqrndexponential(const hqrndstate &state, const double lambdav) +void kdtreebuild(const real_2d_array &xy, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::hqrndexponential(const_cast(state.c_ptr()), lambdav, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -This function generates random number from discrete distribution given by -finite sample X. - -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample - N - number of elements to use, N>=1 - -RESULT - this function returns one of the X[i] for random i=0..N-1 - - -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey -*************************************************************************/ -double hqrnddiscrete(const hqrndstate &state, const real_1d_array &x, const ae_int_t n) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::hqrnddiscrete(const_cast(state.c_ptr()), const_cast(x.c_ptr()), n, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -This function generates random number from continuous distribution given -by finite sample X. - -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample, array[N] (can be larger, in this case only - leading N elements are used). THIS ARRAY MUST BE SORTED BY - ASCENDING. - N - number of elements to use, N>=1 - -RESULT - this function returns random number from continuous distribution which - tries to approximate X as mush as possible. min(X)<=Result<=max(X). - - -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey -*************************************************************************/ -double hqrndcontinuous(const hqrndstate &state, const real_1d_array &x, const ae_int_t n) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::hqrndcontinuous(const_cast(state.c_ptr()), const_cast(x.c_ptr()), n, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* - -*************************************************************************/ -_kdtree_owner::_kdtree_owner() -{ - p_struct = (alglib_impl::kdtree*)alglib_impl::ae_malloc(sizeof(alglib_impl::kdtree), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_kdtree_init(p_struct, NULL); -} - -_kdtree_owner::_kdtree_owner(const _kdtree_owner &rhs) -{ - p_struct = (alglib_impl::kdtree*)alglib_impl::ae_malloc(sizeof(alglib_impl::kdtree), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_kdtree_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_kdtree_owner& _kdtree_owner::operator=(const _kdtree_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_kdtree_clear(p_struct); - alglib_impl::_kdtree_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_kdtree_owner::~_kdtree_owner() -{ - alglib_impl::_kdtree_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::kdtree* _kdtree_owner::c_ptr() -{ - return p_struct; -} - -alglib_impl::kdtree* _kdtree_owner::c_ptr() const -{ - return const_cast(p_struct); -} -kdtree::kdtree() : _kdtree_owner() -{ -} - -kdtree::kdtree(const kdtree &rhs):_kdtree_owner(rhs) -{ -} - -kdtree& kdtree::operator=(const kdtree &rhs) -{ - if( this==&rhs ) - return *this; - _kdtree_owner::operator=(rhs); - return *this; -} - -kdtree::~kdtree() -{ -} - - -/************************************************************************* -This function serializes data structure to string. - -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. -*************************************************************************/ -void kdtreeserialize(kdtree &obj, std::string &s_out) -{ - alglib_impl::ae_state state; - alglib_impl::ae_serializer serializer; - alglib_impl::ae_int_t ssize; - - alglib_impl::ae_state_init(&state); - try - { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_alloc_start(&serializer); - alglib_impl::kdtreealloc(&serializer, obj.c_ptr(), &state); - ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); - s_out.clear(); - s_out.reserve((size_t)(ssize+1)); - alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); - alglib_impl::kdtreeserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - if( s_out.length()>(size_t)ssize ) - throw ap_error("ALGLIB: serialization integrity error"); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(state.error_msg); - } -} -/************************************************************************* -This function unserializes data structure from string. -*************************************************************************/ -void kdtreeunserialize(std::string &s_in, kdtree &obj) -{ - alglib_impl::ae_state state; - alglib_impl::ae_serializer serializer; - - alglib_impl::ae_state_init(&state); - try - { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); - alglib_impl::kdtreeunserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(state.error_msg); - } -} - -/************************************************************************* -KD-tree creation - -This subroutine creates KD-tree from set of X-values and optional Y-values - -INPUT PARAMETERS - XY - dataset, array[0..N-1,0..NX+NY-1]. - one row corresponds to one point. - first NX columns contain X-values, next NY (NY may be zero) - columns may contain associated Y-values - N - number of points, N>=0. - NX - space dimension, NX>=1. - NY - number of optional Y-values, NY>=0. - NormType- norm type: - * 0 denotes infinity-norm - * 1 denotes 1-norm - * 2 denotes 2-norm (Euclidean norm) - -OUTPUT PARAMETERS - KDT - KD-tree - - -NOTES - -1. KD-tree creation have O(N*logN) complexity and O(N*(2*NX+NY)) memory - requirements. -2. Although KD-trees may be used with any combination of N and NX, they - are more efficient than brute-force search only when N >> 4^NX. So they - are most useful in low-dimensional tasks (NX=2, NX=3). NX=1 is another - inefficient case, because simple binary search (without additional - structures) is much more efficient in such tasks than KD-trees. - - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey -*************************************************************************/ -void kdtreebuild(const real_2d_array &xy, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreebuild(const_cast(xy.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreebuild(const_cast(xy.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -597,25 +550,26 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreebuild(const real_2d_array &xy, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt) +#if !defined(AE_NO_EXCEPTIONS) +void kdtreebuild(const real_2d_array &xy, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; n = xy.rows(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreebuild(const_cast(xy.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreebuild(const_cast(xy.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* KD-tree creation @@ -654,20 +608,26 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt) +void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::kdtreebuildtagged(const_cast(xy.c_ptr()), const_cast(tags.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreebuildtagged(const_cast(xy.c_ptr()), const_cast(tags.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -707,30 +667,86 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt) +#if !defined(AE_NO_EXCEPTIONS) +void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t n; if( (xy.rows()!=tags.length())) - throw ap_error("Error while calling 'kdtreebuildtagged': looks like one of arguments has wrong size"); + _ALGLIB_CPP_EXCEPTION("Error while calling 'kdtreebuildtagged': looks like one of arguments has wrong size"); n = xy.rows(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreebuildtagged(const_cast(xy.c_ptr()), const_cast(tags.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreebuildtagged(const_cast(xy.c_ptr()), const_cast(tags.c_ptr()), n, nx, ny, normtype, const_cast(kdt.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +#endif + +/************************************************************************* +This function creates buffer structure which can be used to perform +parallel KD-tree requests. + +KD-tree subpackage provides two sets of request functions - ones which use +internal buffer of KD-tree object (these functions are single-threaded +because they use same buffer, which can not shared between threads), and +ones which use external buffer. + +This function is used to initialize external buffer. + +INPUT PARAMETERS + KDT - KD-tree which is associated with newly created buffer + +OUTPUT PARAMETERS + Buf - external buffer. + + +IMPORTANT: KD-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use buffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +void kdtreecreaterequestbuffer(const kdtree &kdt, kdtreerequestbuffer &buf, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreecreaterequestbuffer(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* K-NN query: K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryKNN() ("Ts" stands for "thread-safe"). + INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. @@ -756,25 +772,37 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch) +ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::kdtreequeryknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* K-NN query: K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryKNN() ("Ts" stands for "thread-safe"). + INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. @@ -800,33 +828,41 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; bool selfmatch; selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::kdtreequeryknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -R-NN query: all points within R-sphere centered at X +K-NN query: K nearest neighbors, using external thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. INPUT PARAMETERS - KDT - KD-tree + KDT - kd-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. X - point, array[0..NX-1]. - R - radius of sphere (in corresponding norm), R>0 + K - number of neighbors to return, K>=1 SelfMatch - whether self-matches are allowed: * if True, nearest neighbor may be the point itself (if it exists in original dataset) @@ -835,42 +871,61 @@ * if not given, considered True RESULT - number of neighbors found, >=0 + number of actual neighbors found (either K or N, if K>N). This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -actual results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r, const bool selfmatch) +ae_int_t kdtreetsqueryknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_int_t result = alglib_impl::kdtreequeryrnn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryknn(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -R-NN query: all points within R-sphere centered at X +K-NN query: K nearest neighbors, using external thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. INPUT PARAMETERS - KDT - KD-tree + KDT - kd-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. X - point, array[0..NX-1]. - R - radius of sphere (in corresponding norm), R>0 + K - number of neighbors to return, K>=1 SelfMatch - whether self-matches are allowed: * if True, nearest neighbor may be the point itself (if it exists in original dataset) @@ -879,66 +934,77 @@ * if not given, considered True RESULT - number of neighbors found, >=0 + number of actual neighbors found (either K or N, if K>N). This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -actual results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreetsqueryknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; bool selfmatch; selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::kdtreequeryrnn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryknn(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -K-NN query: approximate K nearest neighbors +R-NN query: all points within R-sphere centered at X, ordered by distance +between point and X (by ascending). + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. - K - number of neighbors to return, K>=1 + R - radius of sphere (in corresponding norm), R>0 SelfMatch - whether self-matches are allowed: * if True, nearest neighbor may be the point itself (if it exists in original dataset) * if False, then only points with non-zero distance are returned * if not given, considered True - Eps - approximation factor, Eps>=0. eps-approximate nearest - neighbor is a neighbor whose distance from X is at - most (1+eps) times distance of true nearest neighbor. RESULT - number of actual neighbors found (either K or N, if K>N). - -NOTES - significant performance gain may be achieved only when Eps is is on - the order of magnitude of 1 or larger. + number of neighbors found, >=0 This subroutine performs query and stores its result in the internal structures of the KD-tree. You can use following subroutines to obtain -these results: +actual results: * KDTreeQueryResultsX() to get X-values * KDTreeQueryResultsXY() to get X- and Y-values * KDTreeQueryResultsTags() to get tag values @@ -947,49 +1013,59 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const double eps) +ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_int_t result = alglib_impl::kdtreequeryaknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, eps, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryrnn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -K-NN query: approximate K nearest neighbors +R-NN query: all points within R-sphere centered at X, ordered by distance +between point and X (by ascending). + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. - K - number of neighbors to return, K>=1 + R - radius of sphere (in corresponding norm), R>0 SelfMatch - whether self-matches are allowed: * if True, nearest neighbor may be the point itself (if it exists in original dataset) * if False, then only points with non-zero distance are returned * if not given, considered True - Eps - approximation factor, Eps>=0. eps-approximate nearest - neighbor is a neighbor whose distance from X is at - most (1+eps) times distance of true nearest neighbor. RESULT - number of actual neighbors found (either K or N, if K>N). - -NOTES - significant performance gain may be achieved only when Eps is is on - the order of magnitude of 1 or larger. + number of neighbors found, >=0 This subroutine performs query and stores its result in the internal structures of the KD-tree. You can use following subroutines to obtain -these results: +actual results: * KDTreeQueryResultsX() to get X-values * KDTreeQueryResultsXY() to get X- and Y-values * KDTreeQueryResultsTags() to get tag values @@ -998,1135 +1074,2132 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const double eps) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; bool selfmatch; selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::kdtreequeryaknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, eps, &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryrnn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -X-values from last query +R-NN query: all points within R-sphere centered at X, no ordering by +distance as undicated by "U" suffix (faster that ordered query, for large +queries - significantly faster). + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). INPUT PARAMETERS - KDT - KD-tree - X - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True -OUTPUT PARAMETERS - X - rows are filled with X-values +RESULT + number of neighbors found, >=0 -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -SEE ALSO -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsTags() tag values -* KDTreeQueryResultsDistances() distances +As indicated by "U" suffix, this function returns unordered results. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 01.11.2018 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsx(const kdtree &kdt, real_2d_array &x) +ae_int_t kdtreequeryrnnu(const kdtree &kdt, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreequeryresultsx(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryrnnu(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -X- and Y-values from last query +R-NN query: all points within R-sphere centered at X, no ordering by +distance as undicated by "U" suffix (faster that ordered query, for large +queries - significantly faster). + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). INPUT PARAMETERS - KDT - KD-tree - XY - possibly pre-allocated buffer. If XY is too small to store - result, it is resized. If size(XY) is enough to store - result, it is left unchanged. + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True -OUTPUT PARAMETERS - XY - rows are filled with points: first NX columns with - X-values, next NY columns - with Y-values. +RESULT + number of neighbors found, >=0 -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -SEE ALSO -* KDTreeQueryResultsX() X-values -* KDTreeQueryResultsTags() tag values -* KDTreeQueryResultsDistances() distances +As indicated by "U" suffix, this function returns unordered results. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 01.11.2018 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxy(const kdtree &kdt, real_2d_array &xy) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreequeryrnnu(const kdtree &kdt, const real_1d_array &x, const double r, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + bool selfmatch; + + selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreequeryresultsxy(const_cast(kdt.c_ptr()), const_cast(xy.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryrnnu(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -Tags from last query +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, sorted by distance between point and X (by ascending) + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. INPUT PARAMETERS - KDT - KD-tree - Tags - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True -OUTPUT PARAMETERS - Tags - filled with tags associated with points, - or, when no tags were supplied, with zeros +RESULT + number of neighbors found, >=0 -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances -SEE ALSO -* KDTreeQueryResultsX() X-values -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsDistances() distances +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultstags(const kdtree &kdt, integer_1d_array &tags) +ae_int_t kdtreetsqueryrnn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::kdtreequeryresultstags(const_cast(kdt.c_ptr()), const_cast(tags.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryrnn(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Distances from last query +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, sorted by distance between point and X (by ascending) -INPUT PARAMETERS - KDT - KD-tree - R - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -OUTPUT PARAMETERS - R - filled with distances (in corresponding norm) +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True -SEE ALSO -* KDTreeQueryResultsX() X-values -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsTags() tag values +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsdistances(const kdtree &kdt, real_1d_array &r) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreetsqueryrnn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + bool selfmatch; + + selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreequeryresultsdistances(const_cast(kdt.c_ptr()), const_cast(r.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryrnn(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -X-values from last query; 'interactive' variant for languages like Python -which support constructs like "X = KDTreeQueryResultsXI(KDT)" and -interactive mode of interpreter. +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, no ordering by distance as undicated by "U" suffix +(faster that ordered query, for large queries - significantly faster). + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxi(const kdtree &kdt, real_2d_array &x) +ae_int_t kdtreetsqueryrnnu(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::kdtreequeryresultsxi(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryrnnu(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -XY-values from last query; 'interactive' variant for languages like Python -which support constructs like "XY = KDTreeQueryResultsXYI(KDT)" and -interactive mode of interpreter. +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, no ordering by distance as undicated by "U" suffix +(faster that ordered query, for large queries - significantly faster). + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxyi(const kdtree &kdt, real_2d_array &xy) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreetsqueryrnnu(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + bool selfmatch; + + selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreequeryresultsxyi(const_cast(kdt.c_ptr()), const_cast(xy.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryrnnu(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), r, selfmatch, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -Tags from last query; 'interactive' variant for languages like Python -which support constructs like "Tags = KDTreeQueryResultsTagsI(KDT)" and -interactive mode of interpreter. +K-NN query: approximate K nearest neighbors -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryAKNN() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. + +RESULT + number of actual neighbors found (either K or N, if K>N). + +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultstagsi(const kdtree &kdt, integer_1d_array &tags) +ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const double eps, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreequeryresultstagsi(const_cast(kdt.c_ptr()), const_cast(tags.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryaknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, eps, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Distances from last query; 'interactive' variant for languages like Python -which support constructs like "R = KDTreeQueryResultsDistancesI(KDT)" -and interactive mode of interpreter. +K-NN query: approximate K nearest neighbors -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryAKNN() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. + +RESULT + number of actual neighbors found (either K or N, if K>N). + +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsdistancesi(const kdtree &kdt, real_1d_array &r) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const double eps, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + bool selfmatch; + + selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kdtreequeryresultsdistancesi(const_cast(kdt.c_ptr()), const_cast(r.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequeryaknn(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, eps, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* +K-NN query: approximate K nearest neighbors, using thread-local buffer. -*************************************************************************/ -_xdebugrecord1_owner::_xdebugrecord1_owner() -{ - p_struct = (alglib_impl::xdebugrecord1*)alglib_impl::ae_malloc(sizeof(alglib_impl::xdebugrecord1), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_xdebugrecord1_init(p_struct, NULL); -} +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -_xdebugrecord1_owner::_xdebugrecord1_owner(const _xdebugrecord1_owner &rhs) -{ - p_struct = (alglib_impl::xdebugrecord1*)alglib_impl::ae_malloc(sizeof(alglib_impl::xdebugrecord1), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_xdebugrecord1_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. -_xdebugrecord1_owner& _xdebugrecord1_owner::operator=(const _xdebugrecord1_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_xdebugrecord1_clear(p_struct); - alglib_impl::_xdebugrecord1_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} +RESULT + number of actual neighbors found (either K or N, if K>N). -_xdebugrecord1_owner::~_xdebugrecord1_owner() -{ - alglib_impl::_xdebugrecord1_clear(p_struct); - ae_free(p_struct); -} +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. -alglib_impl::xdebugrecord1* _xdebugrecord1_owner::c_ptr() -{ - return p_struct; -} +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances -alglib_impl::xdebugrecord1* _xdebugrecord1_owner::c_ptr() const -{ - return const_cast(p_struct); -} -xdebugrecord1::xdebugrecord1() : _xdebugrecord1_owner() ,i(p_struct->i),c(*((alglib::complex*)(&p_struct->c))),a(&p_struct->a) -{ -} +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -xdebugrecord1::xdebugrecord1(const xdebugrecord1 &rhs):_xdebugrecord1_owner(rhs) ,i(p_struct->i),c(*((alglib::complex*)(&p_struct->c))),a(&p_struct->a) + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryaknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const double eps, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryaknn(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, eps, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -xdebugrecord1& xdebugrecord1::operator=(const xdebugrecord1 &rhs) -{ - if( this==&rhs ) - return *this; - _xdebugrecord1_owner::operator=(rhs); - return *this; -} +/************************************************************************* +K-NN query: approximate K nearest neighbors, using thread-local buffer. -xdebugrecord1::~xdebugrecord1() -{ -} +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. -/************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. -Creates and returns XDebugRecord1 structure: -* integer and complex fields of Rec1 are set to 1 and 1+i correspondingly -* array field of Rec1 is set to [2,3] +RESULT + number of actual neighbors found (either K or N, if K>N). + +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 27.05.2014 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -void xdebuginitrecord1(xdebugrecord1 &rec1) +#if !defined(AE_NO_EXCEPTIONS) +ae_int_t kdtreetsqueryaknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const double eps, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + bool selfmatch; + + selfmatch = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::xdebuginitrecord1(const_cast(rec1.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsqueryaknn(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), k, selfmatch, eps, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Box query: all points within user-specified box. -Counts number of True values in the boolean 1D array. +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryBox() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. + + +RESULT + number of actual neighbors found (in [0,N]). + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() returns zeros for this request + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 14.05.2016 by Bochkanov Sergey *************************************************************************/ -ae_int_t xdebugb1count(const boolean_1d_array &a) +ae_int_t kdtreequerybox(const kdtree &kdt, const real_1d_array &boxmin, const real_1d_array &boxmax, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::xdebugb1count(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreequerybox(const_cast(kdt.c_ptr()), const_cast(boxmin.c_ptr()), const_cast(boxmax.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Box query: all points within user-specified box, using thread-local buffer. -Replace all values in array by NOT(a[i]). -Array is passed using "shared" convention. +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. + +RESULT + number of actual neighbors found (in [0,N]). + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "ts" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() returns zeros for this query + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 14.05.2016 by Bochkanov Sergey *************************************************************************/ -void xdebugb1not(const boolean_1d_array &a) +ae_int_t kdtreetsquerybox(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &boxmin, const real_1d_array &boxmax, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::xdebugb1not(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::kdtreetsquerybox(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(boxmin.c_ptr()), const_cast(boxmax.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +X-values from last query. -Appends copy of array to itself. -Array is passed using "var" convention. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsx(). + +INPUT PARAMETERS + KDT - KD-tree + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + X - rows are filled with X-values + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugb1appendcopy(boolean_1d_array &a) +void kdtreequeryresultsx(const kdtree &kdt, real_2d_array &x, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugb1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultsx(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +X- and Y-values from last query -Generate N-element array with even-numbered elements set to True. -Array is passed using "out" convention. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsxy(). + +INPUT PARAMETERS + KDT - KD-tree + XY - possibly pre-allocated buffer. If XY is too small to store + result, it is resized. If size(XY) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + XY - rows are filled with points: first NX columns with + X-values, next NY columns - with Y-values. + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugb1outeven(const ae_int_t n, boolean_1d_array &a) +void kdtreequeryresultsxy(const kdtree &kdt, real_2d_array &xy, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugb1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultsxy(const_cast(kdt.c_ptr()), const_cast(xy.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Tags from last query -Returns sum of elements in the array. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultstags(). - -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey -*************************************************************************/ -ae_int_t xdebugi1sum(const integer_1d_array &a) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::xdebugi1sum(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +INPUT PARAMETERS + KDT - KD-tree + Tags - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -/************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +OUTPUT PARAMETERS + Tags - filled with tags associated with points, + or, when no tags were supplied, with zeros -Replace all values in array by -A[I] -Array is passed using "shared" convention. +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugi1neg(const integer_1d_array &a) +void kdtreequeryresultstags(const kdtree &kdt, integer_1d_array &tags, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugi1neg(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultstags(const_cast(kdt.c_ptr()), const_cast(tags.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Distances from last query -Appends copy of array to itself. -Array is passed using "var" convention. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsdistances(). + +INPUT PARAMETERS + KDT - KD-tree + R - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + R - filled with distances (in corresponding norm) + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugi1appendcopy(integer_1d_array &a) +void kdtreequeryresultsdistances(const kdtree &kdt, real_1d_array &r, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugi1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultsdistances(const_cast(kdt.c_ptr()), const_cast(r.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +X-values from last query associated with kdtreerequestbuffer object. -Generate N-element array with even-numbered A[I] set to I, and odd-numbered -ones set to 0. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. -Array is passed using "out" convention. +OUTPUT PARAMETERS + X - rows are filled with X-values + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugi1outeven(const ae_int_t n, integer_1d_array &a) +void kdtreetsqueryresultsx(const kdtree &kdt, const kdtreerequestbuffer &buf, real_2d_array &x, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugi1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreetsqueryresultsx(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +X- and Y-values from last query associated with kdtreerequestbuffer object. -Returns sum of elements in the array. +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + XY - possibly pre-allocated buffer. If XY is too small to store + result, it is resized. If size(XY) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + XY - rows are filled with points: first NX columns with + X-values, next NY columns - with Y-values. + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -double xdebugr1sum(const real_1d_array &a) +void kdtreetsqueryresultsxy(const kdtree &kdt, const kdtreerequestbuffer &buf, real_2d_array &xy, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::xdebugr1sum(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreetsqueryresultsxy(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(xy.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Tags from last query associated with kdtreerequestbuffer object. -Replace all values in array by -A[I] -Array is passed using "shared" convention. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultstags(). + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + Tags - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + Tags - filled with tags associated with points, + or, when no tags were supplied, with zeros + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugr1neg(const real_1d_array &a) +void kdtreetsqueryresultstags(const kdtree &kdt, const kdtreerequestbuffer &buf, integer_1d_array &tags, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugr1neg(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreetsqueryresultstags(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(tags.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Distances from last query associated with kdtreerequestbuffer object. -Appends copy of array to itself. -Array is passed using "var" convention. +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultsdistances(). + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + R - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + R - filled with distances (in corresponding norm) + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugr1appendcopy(real_1d_array &a) +void kdtreetsqueryresultsdistances(const kdtree &kdt, const kdtreerequestbuffer &buf, real_1d_array &r, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugr1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreetsqueryresultsdistances(const_cast(kdt.c_ptr()), const_cast(buf.c_ptr()), const_cast(r.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. - -Generate N-element array with even-numbered A[I] set to I*0.25, -and odd-numbered ones are set to 0. +X-values from last query; 'interactive' variant for languages like Python +which support constructs like "X = KDTreeQueryResultsXI(KDT)" and +interactive mode of interpreter. -Array is passed using "out" convention. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugr1outeven(const ae_int_t n, real_1d_array &a) +void kdtreequeryresultsxi(const kdtree &kdt, real_2d_array &x, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugr1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultsxi(const_cast(kdt.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +XY-values from last query; 'interactive' variant for languages like Python +which support constructs like "XY = KDTreeQueryResultsXYI(KDT)" and +interactive mode of interpreter. -Returns sum of elements in the array. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -alglib::complex xdebugc1sum(const complex_1d_array &a) +void kdtreequeryresultsxyi(const kdtree &kdt, real_2d_array &xy, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_complex result = alglib_impl::xdebugc1sum(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultsxyi(const_cast(kdt.c_ptr()), const_cast(xy.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Tags from last query; 'interactive' variant for languages like Python +which support constructs like "Tags = KDTreeQueryResultsTagsI(KDT)" and +interactive mode of interpreter. -Replace all values in array by -A[I] -Array is passed using "shared" convention. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugc1neg(const complex_1d_array &a) +void kdtreequeryresultstagsi(const kdtree &kdt, integer_1d_array &tags, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugc1neg(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultstagsi(const_cast(kdt.c_ptr()), const_cast(tags.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Distances from last query; 'interactive' variant for languages like Python +which support constructs like "R = KDTreeQueryResultsDistancesI(KDT)" +and interactive mode of interpreter. -Appends copy of array to itself. -Array is passed using "var" convention. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void xdebugc1appendcopy(complex_1d_array &a) +void kdtreequeryresultsdistancesi(const kdtree &kdt, real_1d_array &r, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugc1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kdtreequeryresultsdistancesi(const_cast(kdt.c_ptr()), const_cast(r.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. - -Generate N-element array with even-numbered A[K] set to (x,y) = (K*0.25, K*0.125) -and odd-numbered ones are set to 0. - -Array is passed using "out" convention. +Portable high quality random number generator state. +Initialized with HQRNDRandomize() or HQRNDSeed(). - -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey +Fields: + S1, S2 - seed values + V - precomputed value + MagicV - 'magic' value used to determine whether State structure + was correctly initialized. *************************************************************************/ -void xdebugc1outeven(const ae_int_t n, complex_1d_array &a) +_hqrndstate_owner::_hqrndstate_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::xdebugc1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_hqrndstate_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::hqrndstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::hqrndstate), &_state); + memset(p_struct, 0, sizeof(alglib_impl::hqrndstate)); + alglib_impl::_hqrndstate_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_hqrndstate_owner::_hqrndstate_owner(const _hqrndstate_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); + if( p_struct!=NULL ) + { + alglib_impl::_hqrndstate_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: hqrndstate copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::hqrndstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::hqrndstate), &_state); + memset(p_struct, 0, sizeof(alglib_impl::hqrndstate)); + alglib_impl::_hqrndstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -/************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. - -Counts number of True values in the boolean 2D array. - - -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey -*************************************************************************/ -ae_int_t xdebugb2count(const boolean_2d_array &a) +_hqrndstate_owner& _hqrndstate_owner::operator=(const _hqrndstate_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::ae_int_t result = alglib_impl::xdebugb2count(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: hqrndstate assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: hqrndstate assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_hqrndstate_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::hqrndstate)); + alglib_impl::_hqrndstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_hqrndstate_owner::~_hqrndstate_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_hqrndstate_destroy(p_struct); + ae_free(p_struct); } } -/************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +alglib_impl::hqrndstate* _hqrndstate_owner::c_ptr() +{ + return p_struct; +} -Replace all values in array by NOT(a[i]). -Array is passed using "shared" convention. +alglib_impl::hqrndstate* _hqrndstate_owner::c_ptr() const +{ + return const_cast(p_struct); +} +hqrndstate::hqrndstate() : _hqrndstate_owner() +{ +} + +hqrndstate::hqrndstate(const hqrndstate &rhs):_hqrndstate_owner(rhs) +{ +} + +hqrndstate& hqrndstate::operator=(const hqrndstate &rhs) +{ + if( this==&rhs ) + return *this; + _hqrndstate_owner::operator=(rhs); + return *this; +} + +hqrndstate::~hqrndstate() +{ +} + +/************************************************************************* +HQRNDState initialization with random values which come from standard +RNG. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -void xdebugb2not(const boolean_2d_array &a) +void hqrndrandomize(hqrndstate &state, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugb2not(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::hqrndrandomize(const_cast(state.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. - -Transposes array. -Array is passed using "var" convention. +HQRNDState initialization with seed values -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -void xdebugb2transpose(boolean_2d_array &a) +void hqrndseed(const ae_int_t s1, const ae_int_t s2, hqrndstate &state, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugb2transpose(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::hqrndseed(s1, s2, const_cast(state.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +This function generates random real number in (0,1), +not including interval boundaries -Generate MxN matrix with elements set to "Sin(3*I+5*J)>0" -Array is passed using "out" convention. +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -void xdebugb2outsin(const ae_int_t m, const ae_int_t n, boolean_2d_array &a) +double hqrnduniformr(const hqrndstate &state, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::xdebugb2outsin(m, n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::hqrnduniformr(const_cast(state.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +This function generates random integer number in [0, N) -Returns sum of elements in the array. +1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() +2. N can be any positive number except for very large numbers: + * close to 2^31 on 32-bit systems + * close to 2^62 on 64-bit systems + An exception will be generated if N is too large. -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -ae_int_t xdebugi2sum(const integer_2d_array &a) +ae_int_t hqrnduniformi(const hqrndstate &state, const ae_int_t n, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::xdebugi2sum(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::hqrnduniformi(const_cast(state.c_ptr()), n, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Random number generator: normal numbers -Replace all values in array by -a[i,j] -Array is passed using "shared" convention. +This function generates one random number from normal distribution. +Its performance is equal to that of HQRNDNormal2() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -void xdebugi2neg(const integer_2d_array &a) +double hqrndnormal(const hqrndstate &state, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::xdebugi2neg(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::hqrndnormal(const_cast(state.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Random number generator: random X and Y such that X^2+Y^2=1 -Transposes array. -Array is passed using "var" convention. +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey *************************************************************************/ -void xdebugi2transpose(integer_2d_array &a) +void hqrndunit2(const hqrndstate &state, double &x, double &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugi2transpose(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::hqrndunit2(const_cast(state.c_ptr()), &x, &y, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This is debug function intended for testing ALGLIB interface generator. -Never use it in any real life project. +Random number generator: normal numbers -Generate MxN matrix with elements set to "Sign(Sin(3*I+5*J))" -Array is passed using "out" convention. +This function generates two independent random numbers from normal +distribution. Its performance is equal to that of HQRNDNormal() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndnormal2(const hqrndstate &state, double &x1, double &x2, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::hqrndnormal2(const_cast(state.c_ptr()), &x1, &x2, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Random number generator: exponential distribution + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 11.08.2007 by Bochkanov Sergey +*************************************************************************/ +double hqrndexponential(const hqrndstate &state, const double lambdav, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::hqrndexponential(const_cast(state.c_ptr()), lambdav, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This function generates random number from discrete distribution given by +finite sample X. + +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample + N - number of elements to use, N>=1 + +RESULT + this function returns one of the X[i] for random i=0..N-1 + + -- ALGLIB -- + Copyright 08.11.2011 by Bochkanov Sergey +*************************************************************************/ +double hqrnddiscrete(const hqrndstate &state, const real_1d_array &x, const ae_int_t n, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::hqrnddiscrete(const_cast(state.c_ptr()), const_cast(x.c_ptr()), n, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This function generates random number from continuous distribution given +by finite sample X. + +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample, array[N] (can be larger, in this case only + leading N elements are used). THIS ARRAY MUST BE SORTED BY + ASCENDING. + N - number of elements to use, N>=1 + +RESULT + this function returns random number from continuous distribution which + tries to approximate X as mush as possible. min(X)<=Result<=max(X). + + -- ALGLIB -- + Copyright 08.11.2011 by Bochkanov Sergey *************************************************************************/ -void xdebugi2outsin(const ae_int_t m, const ae_int_t n, integer_2d_array &a) +double hqrndcontinuous(const hqrndstate &state, const real_1d_array &x, const ae_int_t n, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::hqrndcontinuous(const_cast(state.c_ptr()), const_cast(x.c_ptr()), n, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} +#endif + +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* + +*************************************************************************/ +_xdebugrecord1_owner::_xdebugrecord1_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_xdebugrecord1_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::xdebugrecord1*)alglib_impl::ae_malloc(sizeof(alglib_impl::xdebugrecord1), &_state); + memset(p_struct, 0, sizeof(alglib_impl::xdebugrecord1)); + alglib_impl::_xdebugrecord1_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_xdebugrecord1_owner::_xdebugrecord1_owner(const _xdebugrecord1_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::xdebugi2outsin(m, n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_xdebugrecord1_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: xdebugrecord1 copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::xdebugrecord1*)alglib_impl::ae_malloc(sizeof(alglib_impl::xdebugrecord1), &_state); + memset(p_struct, 0, sizeof(alglib_impl::xdebugrecord1)); + alglib_impl::_xdebugrecord1_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_xdebugrecord1_owner& _xdebugrecord1_owner::operator=(const _xdebugrecord1_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: xdebugrecord1 assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: xdebugrecord1 assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_xdebugrecord1_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::xdebugrecord1)); + alglib_impl::_xdebugrecord1_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_xdebugrecord1_owner::~_xdebugrecord1_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_xdebugrecord1_destroy(p_struct); + ae_free(p_struct); } } +alglib_impl::xdebugrecord1* _xdebugrecord1_owner::c_ptr() +{ + return p_struct; +} + +alglib_impl::xdebugrecord1* _xdebugrecord1_owner::c_ptr() const +{ + return const_cast(p_struct); +} +xdebugrecord1::xdebugrecord1() : _xdebugrecord1_owner() ,i(p_struct->i),c(*((alglib::complex*)(&p_struct->c))),a(&p_struct->a) +{ +} + +xdebugrecord1::xdebugrecord1(const xdebugrecord1 &rhs):_xdebugrecord1_owner(rhs) ,i(p_struct->i),c(*((alglib::complex*)(&p_struct->c))),a(&p_struct->a) +{ +} + +xdebugrecord1& xdebugrecord1::operator=(const xdebugrecord1 &rhs) +{ + if( this==&rhs ) + return *this; + _xdebugrecord1_owner::operator=(rhs); + return *this; +} + +xdebugrecord1::~xdebugrecord1() +{ +} + /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Returns sum of elements in the array. +Creates and returns XDebugRecord1 structure: +* integer and complex fields of Rec1 are set to 1 and 1+i correspondingly +* array field of Rec1 is set to [2,3] -- ALGLIB -- - Copyright 11.10.2013 by Bochkanov Sergey + Copyright 27.05.2014 by Bochkanov Sergey *************************************************************************/ -double xdebugr2sum(const real_2d_array &a) +void xdebuginitrecord1(xdebugrecord1 &rec1, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::xdebugr2sum(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - catch(alglib_impl::ae_error_type) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebuginitrecord1(const_cast(rec1.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Counts number of True values in the boolean 1D array. + + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +ae_int_t xdebugb1count(const boolean_1d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::xdebugb1count(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Replace all values in array by -a[i,j] +Replace all values in array by NOT(a[i]). Array is passed using "shared" convention. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr2neg(const real_2d_array &a) +void xdebugb1not(const boolean_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugr2neg(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugb1not(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Transposes array. +Appends copy of array to itself. Array is passed using "var" convention. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr2transpose(real_2d_array &a) +void xdebugb1appendcopy(boolean_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugr2transpose(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugb1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Generate MxN matrix with elements set to "Sin(3*I+5*J)" +Generate N-element array with even-numbered elements set to True. Array is passed using "out" convention. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr2outsin(const ae_int_t m, const ae_int_t n, real_2d_array &a) +void xdebugb1outeven(const ae_int_t n, boolean_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugr2outsin(m, n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugb1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -2138,653 +3211,999 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -alglib::complex xdebugc2sum(const complex_2d_array &a) +ae_int_t xdebugi1sum(const integer_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_complex result = alglib_impl::xdebugc2sum(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::xdebugi1sum(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Replace all values in array by -a[i,j] +Replace all values in array by -A[I] Array is passed using "shared" convention. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc2neg(const complex_2d_array &a) +void xdebugi1neg(const integer_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugc2neg(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugi1neg(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Transposes array. +Appends copy of array to itself. Array is passed using "var" convention. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc2transpose(complex_2d_array &a) +void xdebugi1appendcopy(integer_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugc2transpose(const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugi1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Generate MxN matrix with elements set to "Sin(3*I+5*J),Cos(3*I+5*J)" +Generate N-element array with even-numbered A[I] set to I, and odd-numbered +ones set to 0. + Array is passed using "out" convention. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc2outsincos(const ae_int_t m, const ae_int_t n, complex_2d_array &a) +void xdebugi1outeven(const ae_int_t n, integer_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::xdebugc2outsincos(m, n, const_cast(a.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugi1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. -Returns sum of a[i,j]*(1+b[i,j]) such that c[i,j] is True +Returns sum of elements in the array. -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double xdebugmaskedbiasedproductsum(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const real_2d_array &b, const boolean_2d_array &c) +double xdebugr1sum(const real_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::xdebugmaskedbiasedproductsum(m, n, const_cast(a.c_ptr()), const_cast(b.c_ptr()), const_cast(c.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - catch(alglib_impl::ae_error_type) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::xdebugr1sum(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Replace all values in array by -A[I] +Array is passed using "shared" convention. + + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugr1neg(const real_1d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } -} + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugr1neg(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } -///////////////////////////////////////////////////////////////////////// -// -// THIS SECTION CONTAINS IMPLEMENTATION OF COMPUTATIONAL CORE -// -///////////////////////////////////////////////////////////////////////// -namespace alglib_impl -{ -static ae_int_t hqrnd_hqrndmax = 2147483561; -static ae_int_t hqrnd_hqrndm1 = 2147483563; -static ae_int_t hqrnd_hqrndm2 = 2147483399; -static ae_int_t hqrnd_hqrndmagic = 1634357784; -static ae_int_t hqrnd_hqrndintegerbase(hqrndstate* state, - ae_state *_state); +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. +Appends copy of array to itself. +Array is passed using "var" convention. -static ae_int_t nearestneighbor_splitnodesize = 6; -static ae_int_t nearestneighbor_kdtreefirstversion = 0; -static void nearestneighbor_kdtreesplit(kdtree* kdt, - ae_int_t i1, - ae_int_t i2, - ae_int_t d, - double s, - ae_int_t* i3, - ae_state *_state); -static void nearestneighbor_kdtreegeneratetreerec(kdtree* kdt, - ae_int_t* nodesoffs, - ae_int_t* splitsoffs, - ae_int_t i1, - ae_int_t i2, - ae_int_t maxleafsize, - ae_state *_state); -static void nearestneighbor_kdtreequerynnrec(kdtree* kdt, - ae_int_t offs, - ae_state *_state); -static void nearestneighbor_kdtreeinitbox(kdtree* kdt, - /* Real */ ae_vector* x, - ae_state *_state); -static void nearestneighbor_kdtreeallocdatasetindependent(kdtree* kdt, - ae_int_t nx, - ae_int_t ny, - ae_state *_state); -static void nearestneighbor_kdtreeallocdatasetdependent(kdtree* kdt, - ae_int_t n, - ae_int_t nx, - ae_int_t ny, - ae_state *_state); -static void nearestneighbor_kdtreealloctemporaries(kdtree* kdt, - ae_int_t n, - ae_int_t nx, - ae_int_t ny, - ae_state *_state); + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugr1appendcopy(real_1d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugr1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. +Generate N-element array with even-numbered A[I] set to I*0.25, +and odd-numbered ones are set to 0. +Array is passed using "out" convention. + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugr1outeven(const ae_int_t n, real_1d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugr1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. +Returns sum of elements in the array. + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +alglib::complex xdebugc1sum(const complex_1d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_complex result = alglib_impl::xdebugc1sum(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} /************************************************************************* -HQRNDState initialization with random values which come from standard -RNG. +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Replace all values in array by -A[I] +Array is passed using "shared" convention. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void hqrndrandomize(hqrndstate* state, ae_state *_state) +void xdebugc1neg(const complex_1d_array &a, const xparams _xparams) { - ae_int_t s0; - ae_int_t s1; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugc1neg(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - _hqrndstate_clear(state); +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. - s0 = ae_randominteger(hqrnd_hqrndm1, _state); - s1 = ae_randominteger(hqrnd_hqrndm2, _state); - hqrndseed(s0, s1, state, _state); -} +Appends copy of array to itself. +Array is passed using "var" convention. + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugc1appendcopy(complex_1d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugc1appendcopy(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} /************************************************************************* -HQRNDState initialization with seed values +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Generate N-element array with even-numbered A[K] set to (x,y) = (K*0.25, K*0.125) +and odd-numbered ones are set to 0. + +Array is passed using "out" convention. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void hqrndseed(ae_int_t s1, - ae_int_t s2, - hqrndstate* state, - ae_state *_state) +void xdebugc1outeven(const ae_int_t n, complex_1d_array &a, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugc1outeven(n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - _hqrndstate_clear(state); +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. - - /* - * Protection against negative seeds: - * - * SEED := -(SEED+1) - * - * We can use just "-SEED" because there exists such integer number N - * that N<0, -N=N<0 too. (This number is equal to 0x800...000). Need - * to handle such seed correctly forces us to use a bit complicated - * formula. - */ - if( s1<0 ) +Counts number of True values in the boolean 2D array. + + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +ae_int_t xdebugb2count(const boolean_2d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - s1 = -(s1+1); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - if( s2<0 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::xdebugb2count(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Replace all values in array by NOT(a[i]). +Array is passed using "shared" convention. + + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugb2not(const boolean_2d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - s2 = -(s2+1); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - state->s1 = s1%(hqrnd_hqrndm1-1)+1; - state->s2 = s2%(hqrnd_hqrndm2-1)+1; - state->magicv = hqrnd_hqrndmagic; + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugb2not(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function generates random real number in (0,1), -not including interval boundaries +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +Transposes array. +Array is passed using "var" convention. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double hqrnduniformr(hqrndstate* state, ae_state *_state) +void xdebugb2transpose(boolean_2d_array &a, const xparams _xparams) { - double result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugb2transpose(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. - result = (double)(hqrnd_hqrndintegerbase(state, _state)+1)/(double)(hqrnd_hqrndmax+2); - return result; -} +Generate MxN matrix with elements set to "Sin(3*I+5*J)>0" +Array is passed using "out" convention. + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugb2outsin(const ae_int_t m, const ae_int_t n, boolean_2d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugb2outsin(m, n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} /************************************************************************* -This function generates random integer number in [0, N) +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() -2. N can be any positive number except for very large numbers: - * close to 2^31 on 32-bit systems - * close to 2^62 on 64-bit systems - An exception will be generated if N is too large. +Returns sum of elements in the array. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -ae_int_t hqrnduniformi(hqrndstate* state, ae_int_t n, ae_state *_state) +ae_int_t xdebugi2sum(const integer_2d_array &a, const xparams _xparams) { - ae_int_t maxcnt; - ae_int_t mx; - ae_int_t a; - ae_int_t b; - ae_int_t result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::xdebugi2sum(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. +Replace all values in array by -a[i,j] +Array is passed using "shared" convention. - ae_assert(n>0, "HQRNDUniformI: N<=0!", _state); - maxcnt = hqrnd_hqrndmax+1; - - /* - * Two branches: one for N<=MaxCnt, another for N>MaxCnt. - */ - if( n>maxcnt ) + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugi2neg(const integer_2d_array &a, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * N>=MaxCnt. - * - * We have two options here: - * a) N is exactly divisible by MaxCnt - * b) N is not divisible by MaxCnt - * - * In both cases we reduce problem on interval spanning [0,N) - * to several subproblems on intervals spanning [0,MaxCnt). - */ - if( n%maxcnt==0 ) - { - - /* - * N is exactly divisible by MaxCnt. - * - * [0,N) range is dividided into N/MaxCnt bins, - * each of them having length equal to MaxCnt. - * - * We generate: - * * random bin number B - * * random offset within bin A - * Both random numbers are generated by recursively - * calling HQRNDUniformI(). - * - * Result is equal to A+MaxCnt*B. - */ - ae_assert(n/maxcnt<=maxcnt, "HQRNDUniformI: N is too large", _state); - a = hqrnduniformi(state, maxcnt, _state); - b = hqrnduniformi(state, n/maxcnt, _state); - result = a+maxcnt*b; - } - else - { - - /* - * N is NOT exactly divisible by MaxCnt. - * - * [0,N) range is dividided into Ceil(N/MaxCnt) bins, - * each of them having length equal to MaxCnt. - * - * We generate: - * * random bin number B in [0, Ceil(N/MaxCnt)-1] - * * random offset within bin A - * * if both of what is below is true - * 1) bin number B is that of the last bin - * 2) A >= N mod MaxCnt - * then we repeat generation of A/B. - * This stage is essential in order to avoid bias in the result. - * * otherwise, we return A*MaxCnt+N - */ - ae_assert(n/maxcnt+1<=maxcnt, "HQRNDUniformI: N is too large", _state); - result = -1; - do - { - a = hqrnduniformi(state, maxcnt, _state); - b = hqrnduniformi(state, n/maxcnt+1, _state); - if( b==n/maxcnt&&a>=n%maxcnt ) - { - continue; - } - result = a+maxcnt*b; - } - while(result<0); - } - } - else - { - - /* - * N<=MaxCnt - * - * Code below is a bit complicated because we can not simply - * return "HQRNDIntegerBase() mod N" - it will be skewed for - * large N's in [0.1*HQRNDMax...HQRNDMax]. - */ - mx = maxcnt-maxcnt%n; - do - { - result = hqrnd_hqrndintegerbase(state, _state); - } - while(result>=mx); - result = result%n; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - return result; + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugi2neg(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Random number generator: normal numbers - -This function generates one random number from normal distribution. -Its performance is equal to that of HQRNDNormal2() +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +Transposes array. +Array is passed using "var" convention. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double hqrndnormal(hqrndstate* state, ae_state *_state) +void xdebugi2transpose(integer_2d_array &a, const xparams _xparams) { - double v1; - double v2; - double result; - - - hqrndnormal2(state, &v1, &v2, _state); - result = v1; - return result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugi2transpose(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Random number generator: random X and Y such that X^2+Y^2=1 +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +Generate MxN matrix with elements set to "Sign(Sin(3*I+5*J))" +Array is passed using "out" convention. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void hqrndunit2(hqrndstate* state, double* x, double* y, ae_state *_state) +void xdebugi2outsin(const ae_int_t m, const ae_int_t n, integer_2d_array &a, const xparams _xparams) { - double v; - double mx; - double mn; - - *x = 0; - *y = 0; - - do + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - hqrndnormal2(state, x, y, _state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - while(!(ae_fp_neq(*x,(double)(0))||ae_fp_neq(*y,(double)(0)))); - mx = ae_maxreal(ae_fabs(*x, _state), ae_fabs(*y, _state), _state); - mn = ae_minreal(ae_fabs(*x, _state), ae_fabs(*y, _state), _state); - v = mx*ae_sqrt(1+ae_sqr(mn/mx, _state), _state); - *x = *x/v; - *y = *y/v; + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugi2outsin(m, n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Random number generator: normal numbers - -This function generates two independent random numbers from normal -distribution. Its performance is equal to that of HQRNDNormal() +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +Returns sum of elements in the array. -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void hqrndnormal2(hqrndstate* state, - double* x1, - double* x2, - ae_state *_state) +double xdebugr2sum(const real_2d_array &a, const xparams _xparams) { - double u; - double v; - double s; - - *x1 = 0; - *x2 = 0; - - for(;;) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - u = 2*hqrnduniformr(state, _state)-1; - v = 2*hqrnduniformr(state, _state)-1; - s = ae_sqr(u, _state)+ae_sqr(v, _state); - if( ae_fp_greater(s,(double)(0))&&ae_fp_less(s,(double)(1)) ) - { - - /* - * two Sqrt's instead of one to - * avoid overflow when S is too small - */ - s = ae_sqrt(-2*ae_log(s, _state), _state)/ae_sqrt(s, _state); - *x1 = u*s; - *x2 = v*s; - return; - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::xdebugr2sum(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } - /************************************************************************* -Random number generator: exponential distribution +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). +Replace all values in array by -a[i,j] +Array is passed using "shared" convention. -- ALGLIB -- - Copyright 11.08.2007 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double hqrndexponential(hqrndstate* state, - double lambdav, - ae_state *_state) +void xdebugr2neg(const real_2d_array &a, const xparams _xparams) { - double result; - - - ae_assert(ae_fp_greater(lambdav,(double)(0)), "HQRNDExponential: LambdaV<=0!", _state); - result = -ae_log(hqrnduniformr(state, _state), _state)/lambdav; - return result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugr2neg(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function generates random number from discrete distribution given by -finite sample X. - -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample - N - number of elements to use, N>=1 +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -RESULT - this function returns one of the X[i] for random i=0..N-1 +Transposes array. +Array is passed using "var" convention. -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double hqrnddiscrete(hqrndstate* state, - /* Real */ ae_vector* x, - ae_int_t n, - ae_state *_state) +void xdebugr2transpose(real_2d_array &a, const xparams _xparams) { - double result; - - - ae_assert(n>0, "HQRNDDiscrete: N<=0", _state); - ae_assert(n<=x->cnt, "HQRNDDiscrete: Length(X)ptr.p_double[hqrnduniformi(state, n, _state)]; - return result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugr2transpose(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function generates random number from continuous distribution given -by finite sample X. +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample, array[N] (can be larger, in this case only - leading N elements are used). THIS ARRAY MUST BE SORTED BY - ASCENDING. - N - number of elements to use, N>=1 - -RESULT - this function returns random number from continuous distribution which - tries to approximate X as mush as possible. min(X)<=Result<=max(X). +Generate MxN matrix with elements set to "Sin(3*I+5*J)" +Array is passed using "out" convention. -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double hqrndcontinuous(hqrndstate* state, - /* Real */ ae_vector* x, - ae_int_t n, - ae_state *_state) +void xdebugr2outsin(const ae_int_t m, const ae_int_t n, real_2d_array &a, const xparams _xparams) { - double mx; - double mn; - ae_int_t i; - double result; - - - ae_assert(n>0, "HQRNDContinuous: N<=0", _state); - ae_assert(n<=x->cnt, "HQRNDContinuous: Length(X)ptr.p_double[0]; - return result; - } - i = hqrnduniformi(state, n-1, _state); - mn = x->ptr.p_double[i]; - mx = x->ptr.p_double[i+1]; - ae_assert(ae_fp_greater_eq(mx,mn), "HQRNDDiscrete: X is not sorted by ascending", _state); - if( ae_fp_neq(mx,mn) ) - { - result = (mx-mn)*hqrnduniformr(state, _state)+mn; - } - else + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - result = mn; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - return result; + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugr2outsin(m, n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function returns random integer in [0,HQRNDMax] +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -L'Ecuyer, Efficient and portable combined random number generators +Returns sum of elements in the array. + + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -static ae_int_t hqrnd_hqrndintegerbase(hqrndstate* state, - ae_state *_state) +alglib::complex xdebugc2sum(const complex_2d_array &a, const xparams _xparams) { - ae_int_t k; - ae_int_t result; - - - ae_assert(state->magicv==hqrnd_hqrndmagic, "HQRNDIntegerBase: State is not correctly initialized!", _state); - k = state->s1/53668; - state->s1 = 40014*(state->s1-k*53668)-k*12211; - if( state->s1<0 ) - { - state->s1 = state->s1+2147483563; - } - k = state->s2/52774; - state->s2 = 40692*(state->s2-k*52774)-k*3791; - if( state->s2<0 ) - { - state->s2 = state->s2+2147483399; - } - - /* - * Result - */ - result = state->s1-state->s2; - if( result<1 ) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - result = result+2147483562; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - result = result-1; - return result; + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_complex result = alglib_impl::xdebugc2sum(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Replace all values in array by -a[i,j] +Array is passed using "shared" convention. -void _hqrndstate_init(void* _p, ae_state *_state) + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugc2neg(const complex_2d_array &a, const xparams _xparams) { - hqrndstate *p = (hqrndstate*)_p; - ae_touch_ptr((void*)p); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugc2neg(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Transposes array. +Array is passed using "var" convention. -void _hqrndstate_init_copy(void* _dst, void* _src, ae_state *_state) + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugc2transpose(complex_2d_array &a, const xparams _xparams) { - hqrndstate *dst = (hqrndstate*)_dst; - hqrndstate *src = (hqrndstate*)_src; - dst->s1 = src->s1; - dst->s2 = src->s2; - dst->magicv = src->magicv; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugc2transpose(const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. -void _hqrndstate_clear(void* _p) +Generate MxN matrix with elements set to "Sin(3*I+5*J),Cos(3*I+5*J)" +Array is passed using "out" convention. + + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +void xdebugc2outsincos(const ae_int_t m, const ae_int_t n, complex_2d_array &a, const xparams _xparams) { - hqrndstate *p = (hqrndstate*)_p; - ae_touch_ptr((void*)p); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::xdebugc2outsincos(m, n, const_cast(a.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +This is debug function intended for testing ALGLIB interface generator. +Never use it in any real life project. + +Returns sum of a[i,j]*(1+b[i,j]) such that c[i,j] is True -void _hqrndstate_destroy(void* _p) + -- ALGLIB -- + Copyright 11.10.2013 by Bochkanov Sergey +*************************************************************************/ +double xdebugmaskedbiasedproductsum(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const real_2d_array &b, const boolean_2d_array &c, const xparams _xparams) { - hqrndstate *p = (hqrndstate*)_p; - ae_touch_ptr((void*)p); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::xdebugmaskedbiasedproductsum(m, n, const_cast(a.c_ptr()), const_cast(b.c_ptr()), const_cast(c.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} +#endif } +///////////////////////////////////////////////////////////////////////// +// +// THIS SECTION CONTAINS IMPLEMENTATION OF COMPUTATIONAL CORE +// +///////////////////////////////////////////////////////////////////////// +namespace alglib_impl +{ +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) +static ae_int_t nearestneighbor_splitnodesize = 6; +static ae_int_t nearestneighbor_kdtreefirstversion = 0; +static ae_int_t nearestneighbor_tsqueryrnn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_bool orderedbydist, + ae_state *_state); +static void nearestneighbor_kdtreesplit(kdtree* kdt, + ae_int_t i1, + ae_int_t i2, + ae_int_t d, + double s, + ae_int_t* i3, + ae_state *_state); +static void nearestneighbor_kdtreegeneratetreerec(kdtree* kdt, + ae_int_t* nodesoffs, + ae_int_t* splitsoffs, + ae_int_t i1, + ae_int_t i2, + ae_int_t maxleafsize, + ae_state *_state); +static void nearestneighbor_kdtreequerynnrec(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_int_t offs, + ae_state *_state); +static void nearestneighbor_kdtreequeryboxrec(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_int_t offs, + ae_state *_state); +static void nearestneighbor_kdtreeinitbox(kdtree* kdt, + /* Real */ ae_vector* x, + kdtreerequestbuffer* buf, + ae_state *_state); +static void nearestneighbor_kdtreeallocdatasetindependent(kdtree* kdt, + ae_int_t nx, + ae_int_t ny, + ae_state *_state); +static void nearestneighbor_kdtreeallocdatasetdependent(kdtree* kdt, + ae_int_t n, + ae_int_t nx, + ae_int_t ny, + ae_state *_state); +static void nearestneighbor_checkrequestbufferconsistency(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_state *_state); + + +#endif +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) +static ae_int_t hqrnd_hqrndmax = 2147483561; +static ae_int_t hqrnd_hqrndm1 = 2147483563; +static ae_int_t hqrnd_hqrndm2 = 2147483399; +static ae_int_t hqrnd_hqrndmagic = 1634357784; +static ae_int_t hqrnd_hqrndintegerbase(hqrndstate* state, + ae_state *_state); + + +#endif +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) + + +#endif +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) /************************************************************************* @@ -2835,8 +4254,9 @@ ae_int_t i; ae_frame_make(_state, &_frame_block); + memset(&tags, 0, sizeof(tags)); _kdtree_clear(kdt); - ae_vector_init(&tags, 0, DT_INT, _state); + ae_vector_init(&tags, 0, DT_INT, _state, ae_true); ae_assert(n>=0, "KDTreeBuild: N<0", _state); ae_assert(nx>=1, "KDTreeBuild: NX<1", _state); @@ -2906,7 +4326,6 @@ { ae_int_t i; ae_int_t j; - ae_int_t maxnodes; ae_int_t nodesoffs; ae_int_t splitsoffs; @@ -2927,7 +4346,7 @@ kdt->nx = nx; kdt->ny = ny; kdt->normtype = normtype; - kdt->kcur = 0; + kdt->innerbuf.kcur = 0; /* * N=0 => quick exit @@ -2942,6 +4361,7 @@ */ nearestneighbor_kdtreeallocdatasetindependent(kdt, nx, ny, _state); nearestneighbor_kdtreeallocdatasetdependent(kdt, n, nx, ny, _state); + kdtreecreaterequestbuffer(kdt, &kdt->innerbuf, _state); /* * Initial fill @@ -2968,24 +4388,73 @@ } /* - * prepare tree structure - * * MaxNodes=N because we guarantee no trivial splits, i.e. - * every split will generate two non-empty boxes - */ - maxnodes = n; - ae_vector_set_length(&kdt->nodes, nearestneighbor_splitnodesize*2*maxnodes, _state); - ae_vector_set_length(&kdt->splits, 2*maxnodes, _state); + * Generate tree + */ nodesoffs = 0; splitsoffs = 0; - ae_v_move(&kdt->curboxmin.ptr.p_double[0], 1, &kdt->boxmin.ptr.p_double[0], 1, ae_v_len(0,nx-1)); - ae_v_move(&kdt->curboxmax.ptr.p_double[0], 1, &kdt->boxmax.ptr.p_double[0], 1, ae_v_len(0,nx-1)); + ae_v_move(&kdt->innerbuf.curboxmin.ptr.p_double[0], 1, &kdt->boxmin.ptr.p_double[0], 1, ae_v_len(0,nx-1)); + ae_v_move(&kdt->innerbuf.curboxmax.ptr.p_double[0], 1, &kdt->boxmax.ptr.p_double[0], 1, ae_v_len(0,nx-1)); nearestneighbor_kdtreegeneratetreerec(kdt, &nodesoffs, &splitsoffs, 0, n, 8, _state); + ivectorresize(&kdt->nodes, nodesoffs, _state); + rvectorresize(&kdt->splits, splitsoffs, _state); +} + + +/************************************************************************* +This function creates buffer structure which can be used to perform +parallel KD-tree requests. + +KD-tree subpackage provides two sets of request functions - ones which use +internal buffer of KD-tree object (these functions are single-threaded +because they use same buffer, which can not shared between threads), and +ones which use external buffer. + +This function is used to initialize external buffer. + +INPUT PARAMETERS + KDT - KD-tree which is associated with newly created buffer + +OUTPUT PARAMETERS + Buf - external buffer. + + +IMPORTANT: KD-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use buffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +void kdtreecreaterequestbuffer(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_state *_state) +{ + + _kdtreerequestbuffer_clear(buf); + + ae_vector_set_length(&buf->x, kdt->nx, _state); + ae_vector_set_length(&buf->boxmin, kdt->nx, _state); + ae_vector_set_length(&buf->boxmax, kdt->nx, _state); + ae_vector_set_length(&buf->idx, kdt->n, _state); + ae_vector_set_length(&buf->r, kdt->n, _state); + ae_vector_set_length(&buf->buf, ae_maxint(kdt->n, kdt->nx, _state), _state); + ae_vector_set_length(&buf->curboxmin, kdt->nx, _state); + ae_vector_set_length(&buf->curboxmax, kdt->nx, _state); + buf->kcur = 0; } /************************************************************************* K-NN query: K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryKNN() ("Ts" stands for "thread-safe"). + INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. @@ -3023,18 +4492,25 @@ ae_assert(k>=1, "KDTreeQueryKNN: K<1!", _state); ae_assert(x->cnt>=kdt->nx, "KDTreeQueryKNN: Length(X)nx, _state), "KDTreeQueryKNN: X contains infinite or NaN values!", _state); - result = kdtreequeryaknn(kdt, x, k, selfmatch, 0.0, _state); + result = kdtreetsqueryaknn(kdt, &kdt->innerbuf, x, k, selfmatch, 0.0, _state); return result; } /************************************************************************* -R-NN query: all points within R-sphere centered at X +K-NN query: K nearest neighbors, using external thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. INPUT PARAMETERS - KDT - KD-tree + KDT - kd-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. X - point, array[0..NX-1]. - R - radius of sphere (in corresponding norm), R>0 + K - number of neighbors to return, K>=1 SelfMatch - whether self-matches are allowed: * if True, nearest neighbor may be the point itself (if it exists in original dataset) @@ -3043,83 +4519,277 @@ * if not given, considered True RESULT - number of neighbors found, >=0 + number of actual neighbors found (either K or N, if K>N). This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -actual results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 18.03.2016 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryrnn(kdtree* kdt, +ae_int_t kdtreetsqueryknn(kdtree* kdt, + kdtreerequestbuffer* buf, /* Real */ ae_vector* x, - double r, + ae_int_t k, ae_bool selfmatch, ae_state *_state) { - ae_int_t i; - ae_int_t j; ae_int_t result; - ae_assert(ae_fp_greater(r,(double)(0)), "KDTreeQueryRNN: incorrect R!", _state); - ae_assert(x->cnt>=kdt->nx, "KDTreeQueryRNN: Length(X)nx, _state), "KDTreeQueryRNN: X contains infinite or NaN values!", _state); - - /* - * Handle special case: KDT.N=0 - */ - if( kdt->n==0 ) - { - kdt->kcur = 0; - result = 0; - return result; - } - - /* - * Prepare parameters - */ - kdt->kneeded = 0; - if( kdt->normtype!=2 ) - { - kdt->rneeded = r; - } - else - { - kdt->rneeded = ae_sqr(r, _state); - } - kdt->selfmatch = selfmatch; - kdt->approxf = (double)(1); - kdt->kcur = 0; - - /* - * calculate distance from point to current bounding box - */ - nearestneighbor_kdtreeinitbox(kdt, x, _state); + ae_assert(k>=1, "KDTreeTsQueryKNN: K<1!", _state); + ae_assert(x->cnt>=kdt->nx, "KDTreeTsQueryKNN: Length(X)nx, _state), "KDTreeTsQueryKNN: X contains infinite or NaN values!", _state); + result = kdtreetsqueryaknn(kdt, buf, x, k, selfmatch, 0.0, _state); + return result; +} + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, ordered by distance +between point and X (by ascending). + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreequeryrnn(kdtree* kdt, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state) +{ + ae_int_t result; + + + ae_assert(ae_fp_greater(r,(double)(0)), "KDTreeQueryRNN: incorrect R!", _state); + ae_assert(x->cnt>=kdt->nx, "KDTreeQueryRNN: Length(X)nx, _state), "KDTreeQueryRNN: X contains infinite or NaN values!", _state); + result = kdtreetsqueryrnn(kdt, &kdt->innerbuf, x, r, selfmatch, _state); + return result; +} + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, no ordering by +distance as undicated by "U" suffix (faster that ordered query, for large +queries - significantly faster). + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. + + -- ALGLIB -- + Copyright 01.11.2018 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreequeryrnnu(kdtree* kdt, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state) +{ + ae_int_t result; + + + ae_assert(ae_fp_greater(r,(double)(0)), "KDTreeQueryRNNU: incorrect R!", _state); + ae_assert(x->cnt>=kdt->nx, "KDTreeQueryRNNU: Length(X)nx, _state), "KDTreeQueryRNNU: X contains infinite or NaN values!", _state); + result = kdtreetsqueryrnnu(kdt, &kdt->innerbuf, x, r, selfmatch, _state); + return result; +} + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, sorted by distance between point and X (by ascending) + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances - /* - * call recursive search - * results are returned as heap - */ - nearestneighbor_kdtreequerynnrec(kdt, 0, _state); +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryrnn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state) +{ + ae_int_t result; + + + ae_assert(ae_isfinite(r, _state)&&ae_fp_greater(r,(double)(0)), "KDTreeTsQueryRNN: incorrect R!", _state); + ae_assert(x->cnt>=kdt->nx, "KDTreeTsQueryRNN: Length(X)nx, _state), "KDTreeTsQueryRNN: X contains infinite or NaN values!", _state); + result = nearestneighbor_tsqueryrnn(kdt, buf, x, r, selfmatch, ae_true, _state); + return result; +} + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, no ordering by distance as undicated by "U" suffix +(faster that ordered query, for large queries - significantly faster). + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. - /* - * pop from heap to generate ordered representation - * - * last element is not pop'ed because it is already in - * its place - */ - result = kdt->kcur; - j = kdt->kcur; - for(i=kdt->kcur; i>=2; i--) - { - tagheappopi(&kdt->r, &kdt->idx, &j, _state); - } +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryrnnu(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state) +{ + ae_int_t result; + + + ae_assert(ae_isfinite(r, _state)&&ae_fp_greater(r,(double)(0)), "KDTreeTsQueryRNNU: incorrect R!", _state); + ae_assert(x->cnt>=kdt->nx, "KDTreeTsQueryRNNU: Length(X)nx, _state), "KDTreeTsQueryRNNU: X contains infinite or NaN values!", _state); + result = nearestneighbor_tsqueryrnn(kdt, buf, x, r, selfmatch, ae_false, _state); return result; } @@ -3127,6 +4797,12 @@ /************************************************************************* K-NN query: approximate K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryAKNN() ("Ts" stands for "thread-safe"). + INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. @@ -3166,53 +4842,122 @@ double eps, ae_state *_state) { + ae_int_t result; + + + result = kdtreetsqueryaknn(kdt, &kdt->innerbuf, x, k, selfmatch, eps, _state); + return result; +} + + +/************************************************************************* +K-NN query: approximate K nearest neighbors, using thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. + +RESULT + number of actual neighbors found (either K or N, if K>N). + +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryaknn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + ae_int_t k, + ae_bool selfmatch, + double eps, + ae_state *_state) +{ ae_int_t i; ae_int_t j; ae_int_t result; - ae_assert(k>0, "KDTreeQueryAKNN: incorrect K!", _state); - ae_assert(ae_fp_greater_eq(eps,(double)(0)), "KDTreeQueryAKNN: incorrect Eps!", _state); - ae_assert(x->cnt>=kdt->nx, "KDTreeQueryAKNN: Length(X)nx, _state), "KDTreeQueryAKNN: X contains infinite or NaN values!", _state); + ae_assert(k>0, "KDTreeTsQueryAKNN: incorrect K!", _state); + ae_assert(ae_fp_greater_eq(eps,(double)(0)), "KDTreeTsQueryAKNN: incorrect Eps!", _state); + ae_assert(x->cnt>=kdt->nx, "KDTreeTsQueryAKNN: Length(X)nx, _state), "KDTreeTsQueryAKNN: X contains infinite or NaN values!", _state); /* * Handle special case: KDT.N=0 */ if( kdt->n==0 ) { - kdt->kcur = 0; + buf->kcur = 0; result = 0; return result; } /* + * Check consistency of request buffer + */ + nearestneighbor_checkrequestbufferconsistency(kdt, buf, _state); + + /* * Prepare parameters */ k = ae_minint(k, kdt->n, _state); - kdt->kneeded = k; - kdt->rneeded = (double)(0); - kdt->selfmatch = selfmatch; + buf->kneeded = k; + buf->rneeded = (double)(0); + buf->selfmatch = selfmatch; if( kdt->normtype==2 ) { - kdt->approxf = 1/ae_sqr(1+eps, _state); + buf->approxf = 1/ae_sqr(1+eps, _state); } else { - kdt->approxf = 1/(1+eps); + buf->approxf = 1/(1+eps); } - kdt->kcur = 0; + buf->kcur = 0; /* * calculate distance from point to current bounding box */ - nearestneighbor_kdtreeinitbox(kdt, x, _state); + nearestneighbor_kdtreeinitbox(kdt, x, buf, _state); /* * call recursive search * results are returned as heap */ - nearestneighbor_kdtreequerynnrec(kdt, 0, _state); + nearestneighbor_kdtreequerynnrec(kdt, buf, 0, _state); /* * pop from heap to generate ordered representation @@ -3220,41 +4965,187 @@ * last element is non pop'ed because it is already in * its place */ - result = kdt->kcur; - j = kdt->kcur; - for(i=kdt->kcur; i>=2; i--) + result = buf->kcur; + j = buf->kcur; + for(i=buf->kcur; i>=2; i--) { - tagheappopi(&kdt->r, &kdt->idx, &j, _state); + tagheappopi(&buf->r, &buf->idx, &j, _state); } return result; } /************************************************************************* -X-values from last query +Box query: all points within user-specified box. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryBox() ("Ts" stands for "thread-safe"). INPUT PARAMETERS - KDT - KD-tree - X - possibly pre-allocated buffer. If X is too small to store - result, it is resized. If size(X) is enough to store - result, it is left unchanged. + KDT - KD-tree + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. -OUTPUT PARAMETERS - X - rows are filled with X-values -NOTES -1. points are ordered by distance from the query point (first = closest) -2. if XY is larger than required to store result, only leading part will - be overwritten; trailing part will be left unchanged. So if on input - XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get - XY = [[1,2],[C,D]]. This is done purposely to increase performance; if - you want function to resize array according to result size, use - function with same name and suffix 'I'. +RESULT + number of actual neighbors found (in [0,N]). -SEE ALSO -* KDTreeQueryResultsXY() X- and Y-values -* KDTreeQueryResultsTags() tag values -* KDTreeQueryResultsDistances() distances +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() returns zeros for this request + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). + + -- ALGLIB -- + Copyright 14.05.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreequerybox(kdtree* kdt, + /* Real */ ae_vector* boxmin, + /* Real */ ae_vector* boxmax, + ae_state *_state) +{ + ae_int_t result; + + + result = kdtreetsquerybox(kdt, &kdt->innerbuf, boxmin, boxmax, _state); + return result; +} + + +/************************************************************************* +Box query: all points within user-specified box, using thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. + +RESULT + number of actual neighbors found (in [0,N]). + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "ts" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() returns zeros for this query + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 14.05.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsquerybox(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* boxmin, + /* Real */ ae_vector* boxmax, + ae_state *_state) +{ + ae_int_t j; + ae_int_t result; + + + ae_assert(boxmin->cnt>=kdt->nx, "KDTreeTsQueryBox: Length(BoxMin)cnt>=kdt->nx, "KDTreeTsQueryBox: Length(BoxMax)nx, _state), "KDTreeTsQueryBox: BoxMin contains infinite or NaN values!", _state); + ae_assert(isfinitevector(boxmax, kdt->nx, _state), "KDTreeTsQueryBox: BoxMax contains infinite or NaN values!", _state); + + /* + * Check consistency of request buffer + */ + nearestneighbor_checkrequestbufferconsistency(kdt, buf, _state); + + /* + * Quick exit for degenerate boxes + */ + for(j=0; j<=kdt->nx-1; j++) + { + if( ae_fp_greater(boxmin->ptr.p_double[j],boxmax->ptr.p_double[j]) ) + { + buf->kcur = 0; + result = 0; + return result; + } + } + + /* + * Prepare parameters + */ + for(j=0; j<=kdt->nx-1; j++) + { + buf->boxmin.ptr.p_double[j] = boxmin->ptr.p_double[j]; + buf->boxmax.ptr.p_double[j] = boxmax->ptr.p_double[j]; + buf->curboxmin.ptr.p_double[j] = boxmin->ptr.p_double[j]; + buf->curboxmax.ptr.p_double[j] = boxmax->ptr.p_double[j]; + } + buf->kcur = 0; + + /* + * call recursive search + */ + nearestneighbor_kdtreequeryboxrec(kdt, buf, 0, _state); + result = buf->kcur; + return result; +} + + +/************************************************************************* +X-values from last query. + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsx(). + +INPUT PARAMETERS + KDT - KD-tree + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + X - rows are filled with X-values + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey @@ -3263,31 +5154,209 @@ /* Real */ ae_matrix* x, ae_state *_state) { + + + kdtreetsqueryresultsx(kdt, &kdt->innerbuf, x, _state); +} + + +/************************************************************************* +X- and Y-values from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsxy(). + +INPUT PARAMETERS + KDT - KD-tree + XY - possibly pre-allocated buffer. If XY is too small to store + result, it is resized. If size(XY) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + XY - rows are filled with points: first NX columns with + X-values, next NY columns - with Y-values. + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreequeryresultsxy(kdtree* kdt, + /* Real */ ae_matrix* xy, + ae_state *_state) +{ + + + kdtreetsqueryresultsxy(kdt, &kdt->innerbuf, xy, _state); +} + + +/************************************************************************* +Tags from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultstags(). + +INPUT PARAMETERS + KDT - KD-tree + Tags - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + Tags - filled with tags associated with points, + or, when no tags were supplied, with zeros + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsDistances() distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreequeryresultstags(kdtree* kdt, + /* Integer */ ae_vector* tags, + ae_state *_state) +{ + + + kdtreetsqueryresultstags(kdt, &kdt->innerbuf, tags, _state); +} + + +/************************************************************************* +Distances from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsdistances(). + +INPUT PARAMETERS + KDT - KD-tree + R - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + R - filled with distances (in corresponding norm) + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreequeryresultsdistances(kdtree* kdt, + /* Real */ ae_vector* r, + ae_state *_state) +{ + + + kdtreetsqueryresultsdistances(kdt, &kdt->innerbuf, r, _state); +} + + +/************************************************************************* +X-values from last query associated with kdtreerequestbuffer object. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + X - rows are filled with X-values + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreetsqueryresultsx(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_matrix* x, + ae_state *_state) +{ ae_int_t i; ae_int_t k; - if( kdt->kcur==0 ) + if( buf->kcur==0 ) { return; } - if( x->rowskcur||x->colsnx ) + if( x->rowskcur||x->colsnx ) { - ae_matrix_set_length(x, kdt->kcur, kdt->nx, _state); + ae_matrix_set_length(x, buf->kcur, kdt->nx, _state); } - k = kdt->kcur; + k = buf->kcur; for(i=0; i<=k-1; i++) { - ae_v_move(&x->ptr.pp_double[i][0], 1, &kdt->xy.ptr.pp_double[kdt->idx.ptr.p_int[i]][kdt->nx], 1, ae_v_len(0,kdt->nx-1)); + ae_v_move(&x->ptr.pp_double[i][0], 1, &kdt->xy.ptr.pp_double[buf->idx.ptr.p_int[i]][kdt->nx], 1, ae_v_len(0,kdt->nx-1)); } } /************************************************************************* -X- and Y-values from last query +X- and Y-values from last query associated with kdtreerequestbuffer object. INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. XY - possibly pre-allocated buffer. If XY is too small to store result, it is resized. If size(XY) is enough to store result, it is left unchanged. @@ -3313,7 +5382,8 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxy(kdtree* kdt, +void kdtreetsqueryresultsxy(kdtree* kdt, + kdtreerequestbuffer* buf, /* Real */ ae_matrix* xy, ae_state *_state) { @@ -3321,27 +5391,34 @@ ae_int_t k; - if( kdt->kcur==0 ) + if( buf->kcur==0 ) { return; } - if( xy->rowskcur||xy->colsnx+kdt->ny ) + if( xy->rowskcur||xy->colsnx+kdt->ny ) { - ae_matrix_set_length(xy, kdt->kcur, kdt->nx+kdt->ny, _state); + ae_matrix_set_length(xy, buf->kcur, kdt->nx+kdt->ny, _state); } - k = kdt->kcur; + k = buf->kcur; for(i=0; i<=k-1; i++) { - ae_v_move(&xy->ptr.pp_double[i][0], 1, &kdt->xy.ptr.pp_double[kdt->idx.ptr.p_int[i]][kdt->nx], 1, ae_v_len(0,kdt->nx+kdt->ny-1)); + ae_v_move(&xy->ptr.pp_double[i][0], 1, &kdt->xy.ptr.pp_double[buf->idx.ptr.p_int[i]][kdt->nx], 1, ae_v_len(0,kdt->nx+kdt->ny-1)); } } /************************************************************************* -Tags from last query +Tags from last query associated with kdtreerequestbuffer object. + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultstags(). INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. Tags - possibly pre-allocated buffer. If X is too small to store result, it is resized. If size(X) is enough to store result, it is left unchanged. @@ -3367,7 +5444,8 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultstags(kdtree* kdt, +void kdtreetsqueryresultstags(kdtree* kdt, + kdtreerequestbuffer* buf, /* Integer */ ae_vector* tags, ae_state *_state) { @@ -3375,27 +5453,34 @@ ae_int_t k; - if( kdt->kcur==0 ) + if( buf->kcur==0 ) { return; } - if( tags->cntkcur ) + if( tags->cntkcur ) { - ae_vector_set_length(tags, kdt->kcur, _state); + ae_vector_set_length(tags, buf->kcur, _state); } - k = kdt->kcur; + k = buf->kcur; for(i=0; i<=k-1; i++) { - tags->ptr.p_int[i] = kdt->tags.ptr.p_int[kdt->idx.ptr.p_int[i]]; + tags->ptr.p_int[i] = kdt->tags.ptr.p_int[buf->idx.ptr.p_int[i]]; } } /************************************************************************* -Distances from last query +Distances from last query associated with kdtreerequestbuffer object. + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultsdistances(). INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. R - possibly pre-allocated buffer. If X is too small to store result, it is resized. If size(X) is enough to store result, it is left unchanged. @@ -3420,7 +5505,8 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsdistances(kdtree* kdt, +void kdtreetsqueryresultsdistances(kdtree* kdt, + kdtreerequestbuffer* buf, /* Real */ ae_vector* r, ae_state *_state) { @@ -3428,15 +5514,15 @@ ae_int_t k; - if( kdt->kcur==0 ) + if( buf->kcur==0 ) { return; } - if( r->cntkcur ) + if( r->cntkcur ) { - ae_vector_set_length(r, kdt->kcur, _state); + ae_vector_set_length(r, buf->kcur, _state); } - k = kdt->kcur; + k = buf->kcur; /* * unload norms @@ -3448,21 +5534,21 @@ { for(i=0; i<=k-1; i++) { - r->ptr.p_double[i] = ae_fabs(kdt->r.ptr.p_double[i], _state); + r->ptr.p_double[i] = ae_fabs(buf->r.ptr.p_double[i], _state); } } if( kdt->normtype==1 ) { for(i=0; i<=k-1; i++) { - r->ptr.p_double[i] = ae_fabs(kdt->r.ptr.p_double[i], _state); + r->ptr.p_double[i] = ae_fabs(buf->r.ptr.p_double[i], _state); } } if( kdt->normtype==2 ) { for(i=0; i<=k-1; i++) { - r->ptr.p_double[i] = ae_sqrt(ae_fabs(kdt->r.ptr.p_double[i], _state), _state); + r->ptr.p_double[i] = ae_sqrt(ae_fabs(buf->r.ptr.p_double[i], _state), _state); } } } @@ -3561,22 +5647,201 @@ /************************************************************************* -Serializer: allocation +It is informational function which returns bounding box for entire dataset. +This function is not visible to ALGLIB users, only ALGLIB itself may use +it. + +This function assumes that output buffers are preallocated by caller. -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey + Copyright 20.06.2016 by Bochkanov Sergey *************************************************************************/ -void kdtreealloc(ae_serializer* s, kdtree* tree, ae_state *_state) +void kdtreeexplorebox(kdtree* kdt, + /* Real */ ae_vector* boxmin, + /* Real */ ae_vector* boxmax, + ae_state *_state) { + ae_int_t i; - - /* - * Header - */ - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - + rvectorsetlengthatleast(boxmin, kdt->nx, _state); + rvectorsetlengthatleast(boxmax, kdt->nx, _state); + for(i=0; i<=kdt->nx-1; i++) + { + boxmin->ptr.p_double[i] = kdt->boxmin.ptr.p_double[i]; + boxmax->ptr.p_double[i] = kdt->boxmax.ptr.p_double[i]; + } +} + + +/************************************************************************* +It is informational function which allows to get information about node +type. Node index is given by integer value, with 0 corresponding to root +node and other node indexes obtained via exploration. + +You should not expect that serialization/unserialization will retain node +indexes. You should keep in mind that future versions of ALGLIB may +introduce new node types. + +OUTPUT VALUES: + NodeType - node type: + * 0 corresponds to leaf node, which can be explored by + kdtreeexploreleaf() function + * 1 corresponds to split node, which can be explored + by kdtreeexploresplit() function + + -- ALGLIB -- + Copyright 20.06.2016 by Bochkanov Sergey +*************************************************************************/ +void kdtreeexplorenodetype(kdtree* kdt, + ae_int_t node, + ae_int_t* nodetype, + ae_state *_state) +{ + + *nodetype = 0; + + ae_assert(node>=0, "KDTreeExploreNodeType: incorrect node", _state); + ae_assert(nodenodes.cnt, "KDTreeExploreNodeType: incorrect node", _state); + if( kdt->nodes.ptr.p_int[node]>0 ) + { + + /* + * Leaf node + */ + *nodetype = 0; + return; + } + if( kdt->nodes.ptr.p_int[node]==0 ) + { + + /* + * Split node + */ + *nodetype = 1; + return; + } + ae_assert(ae_false, "KDTreeExploreNodeType: integrity check failure", _state); +} + + +/************************************************************************* +It is informational function which allows to get information about leaf +node. Node index is given by integer value, with 0 corresponding to root +node and other node indexes obtained via exploration. + +You should not expect that serialization/unserialization will retain node +indexes. You should keep in mind that future versions of ALGLIB may +introduce new node types. + +OUTPUT VALUES: + XT - output buffer is reallocated (if too small) and filled by + XY values + K - number of rows in XY + + -- ALGLIB -- + Copyright 20.06.2016 by Bochkanov Sergey +*************************************************************************/ +void kdtreeexploreleaf(kdtree* kdt, + ae_int_t node, + /* Real */ ae_matrix* xy, + ae_int_t* k, + ae_state *_state) +{ + ae_int_t offs; + ae_int_t i; + ae_int_t j; + + *k = 0; + + ae_assert(node>=0, "KDTreeExploreLeaf: incorrect node index", _state); + ae_assert(node+1nodes.cnt, "KDTreeExploreLeaf: incorrect node index", _state); + ae_assert(kdt->nodes.ptr.p_int[node]>0, "KDTreeExploreLeaf: incorrect node index", _state); + *k = kdt->nodes.ptr.p_int[node]; + offs = kdt->nodes.ptr.p_int[node+1]; + ae_assert(offs>=0, "KDTreeExploreLeaf: integrity error", _state); + ae_assert(offs+(*k)-1xy.rows, "KDTreeExploreLeaf: integrity error", _state); + rmatrixsetlengthatleast(xy, *k, kdt->nx+kdt->ny, _state); + for(i=0; i<=*k-1; i++) + { + for(j=0; j<=kdt->nx+kdt->ny-1; j++) + { + xy->ptr.pp_double[i][j] = kdt->xy.ptr.pp_double[offs+i][kdt->nx+j]; + } + } +} + + +/************************************************************************* +It is informational function which allows to get information about split +node. Node index is given by integer value, with 0 corresponding to root +node and other node indexes obtained via exploration. + +You should not expect that serialization/unserialization will retain node +indexes. You should keep in mind that future versions of ALGLIB may +introduce new node types. + +OUTPUT VALUES: + XT - output buffer is reallocated (if too small) and filled by + XY values + K - number of rows in XY + + // Nodes[idx+1]=dim dimension to split + // Nodes[idx+2]=offs offset of splitting point in Splits[] + // Nodes[idx+3]=left position of left child in Nodes[] + // Nodes[idx+4]=right position of right child in Nodes[] + + -- ALGLIB -- + Copyright 20.06.2016 by Bochkanov Sergey +*************************************************************************/ +void kdtreeexploresplit(kdtree* kdt, + ae_int_t node, + ae_int_t* d, + double* s, + ae_int_t* nodele, + ae_int_t* nodege, + ae_state *_state) +{ + + *d = 0; + *s = 0; + *nodele = 0; + *nodege = 0; + + ae_assert(node>=0, "KDTreeExploreSplit: incorrect node index", _state); + ae_assert(node+4nodes.cnt, "KDTreeExploreSplit: incorrect node index", _state); + ae_assert(kdt->nodes.ptr.p_int[node]==0, "KDTreeExploreSplit: incorrect node index", _state); + *d = kdt->nodes.ptr.p_int[node+1]; + *s = kdt->splits.ptr.p_double[kdt->nodes.ptr.p_int[node+2]]; + *nodele = kdt->nodes.ptr.p_int[node+3]; + *nodege = kdt->nodes.ptr.p_int[node+4]; + ae_assert(*d>=0, "KDTreeExploreSplit: integrity failure", _state); + ae_assert(*dnx, "KDTreeExploreSplit: integrity failure", _state); + ae_assert(ae_isfinite(*s, _state), "KDTreeExploreSplit: integrity failure", _state); + ae_assert(*nodele>=0, "KDTreeExploreSplit: integrity failure", _state); + ae_assert(*nodelenodes.cnt, "KDTreeExploreSplit: integrity failure", _state); + ae_assert(*nodege>=0, "KDTreeExploreSplit: integrity failure", _state); + ae_assert(*nodegenodes.cnt, "KDTreeExploreSplit: integrity failure", _state); +} + + +/************************************************************************* +Serializer: allocation + + -- ALGLIB -- + Copyright 14.03.2011 by Bochkanov Sergey +*************************************************************************/ +void kdtreealloc(ae_serializer* s, kdtree* tree, ae_state *_state) +{ + + + + /* + * Header + */ + ae_serializer_alloc_entry(s); + ae_serializer_alloc_entry(s); + /* * Data */ @@ -3661,7 +5926,128 @@ unserializerealarray(s, &tree->boxmax, _state); unserializeintegerarray(s, &tree->nodes, _state); unserializerealarray(s, &tree->splits, _state); - nearestneighbor_kdtreealloctemporaries(tree, tree->n, tree->nx, tree->ny, _state); + kdtreecreaterequestbuffer(tree, &tree->innerbuf, _state); +} + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, sorted by distance between point and X (by ascending) + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +static ae_int_t nearestneighbor_tsqueryrnn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_bool orderedbydist, + ae_state *_state) +{ + ae_int_t i; + ae_int_t j; + ae_int_t result; + + + + /* + * Handle special case: KDT.N=0 + */ + if( kdt->n==0 ) + { + buf->kcur = 0; + result = 0; + return result; + } + + /* + * Check consistency of request buffer + */ + nearestneighbor_checkrequestbufferconsistency(kdt, buf, _state); + + /* + * Prepare parameters + */ + buf->kneeded = 0; + if( kdt->normtype!=2 ) + { + buf->rneeded = r; + } + else + { + buf->rneeded = ae_sqr(r, _state); + } + buf->selfmatch = selfmatch; + buf->approxf = (double)(1); + buf->kcur = 0; + + /* + * calculate distance from point to current bounding box + */ + nearestneighbor_kdtreeinitbox(kdt, x, buf, _state); + + /* + * call recursive search + * results are returned as heap + */ + nearestneighbor_kdtreequerynnrec(kdt, buf, 0, _state); + result = buf->kcur; + + /* + * pop from heap to generate ordered representation + * + * last element is not pop'ed because it is already in + * its place + */ + if( orderedbydist ) + { + j = buf->kcur; + for(i=buf->kcur; i>=2; i--) + { + tagheappopi(&buf->r, &buf->idx, &j, _state); + } + } + return result; } @@ -3703,7 +6089,7 @@ iright = i2-1; while(ileftxy.ptr.pp_double[ileft][d],s) ) + if( kdt->xy.ptr.pp_double[ileft][d]<=s ) { /* @@ -3731,7 +6117,7 @@ iright = iright-1; } } - if( ae_fp_less_eq(kdt->xy.ptr.pp_double[ileft][d],s) ) + if( kdt->xy.ptr.pp_double[ileft][d]<=s ) { ileft = ileft+1; } @@ -3812,11 +6198,11 @@ * In case bounding box has zero size, we enforce creation of the leaf node. */ d = 0; - ds = kdt->curboxmax.ptr.p_double[0]-kdt->curboxmin.ptr.p_double[0]; + ds = kdt->innerbuf.curboxmax.ptr.p_double[0]-kdt->innerbuf.curboxmin.ptr.p_double[0]; for(i=1; i<=nx-1; i++) { - v = kdt->curboxmax.ptr.p_double[i]-kdt->curboxmin.ptr.p_double[i]; - if( ae_fp_greater(v,ds) ) + v = kdt->innerbuf.curboxmax.ptr.p_double[i]-kdt->innerbuf.curboxmin.ptr.p_double[i]; + if( v>ds ) { ds = v; d = i; @@ -3838,38 +6224,38 @@ * (MinV=MaxV) we enforce D-th dimension of bounding * box to become exactly zero and repeat tree construction. */ - s = kdt->curboxmin.ptr.p_double[d]+0.5*ds; - ae_v_move(&kdt->buf.ptr.p_double[0], 1, &kdt->xy.ptr.pp_double[i1][d], kdt->xy.stride, ae_v_len(0,i2-i1-1)); + s = kdt->innerbuf.curboxmin.ptr.p_double[d]+0.5*ds; + ae_v_move(&kdt->innerbuf.buf.ptr.p_double[0], 1, &kdt->xy.ptr.pp_double[i1][d], kdt->xy.stride, ae_v_len(0,i2-i1-1)); n = i2-i1; cntless = 0; cntgreater = 0; - minv = kdt->buf.ptr.p_double[0]; - maxv = kdt->buf.ptr.p_double[0]; + minv = kdt->innerbuf.buf.ptr.p_double[0]; + maxv = kdt->innerbuf.buf.ptr.p_double[0]; minidx = i1; maxidx = i1; for(i=0; i<=n-1; i++) { - v = kdt->buf.ptr.p_double[i]; - if( ae_fp_less(v,minv) ) + v = kdt->innerbuf.buf.ptr.p_double[i]; + if( vmaxv ) { maxv = v; maxidx = i1+i; } - if( ae_fp_less(v,s) ) + if( vs ) { cntgreater = cntgreater+1; } } - if( ae_fp_eq(minv,maxv) ) + if( minv==maxv ) { /* @@ -3877,13 +6263,13 @@ * (MinV=MaxV) we enforce D-th dimension of bounding * box to become exactly zero and repeat tree construction. */ - v0 = kdt->curboxmin.ptr.p_double[d]; - v1 = kdt->curboxmax.ptr.p_double[d]; - kdt->curboxmin.ptr.p_double[d] = minv; - kdt->curboxmax.ptr.p_double[d] = maxv; + v0 = kdt->innerbuf.curboxmin.ptr.p_double[d]; + v1 = kdt->innerbuf.curboxmax.ptr.p_double[d]; + kdt->innerbuf.curboxmin.ptr.p_double[d] = minv; + kdt->innerbuf.curboxmax.ptr.p_double[d] = maxv; nearestneighbor_kdtreegeneratetreerec(kdt, nodesoffs, splitsoffs, i1, i2, maxleafsize, _state); - kdt->curboxmin.ptr.p_double[d] = v0; - kdt->curboxmax.ptr.p_double[d] = v1; + kdt->innerbuf.curboxmin.ptr.p_double[d] = v0; + kdt->innerbuf.curboxmax.ptr.p_double[d] = v1; return; } if( cntless>0&&cntgreater>0 ) @@ -3960,21 +6346,28 @@ *splitsoffs = *splitsoffs+1; /* - * Recirsive generation: + * Recursive generation: * * update CurBox * * call subroutine * * restore CurBox */ kdt->nodes.ptr.p_int[oldoffs+3] = *nodesoffs; - v = kdt->curboxmax.ptr.p_double[d]; - kdt->curboxmax.ptr.p_double[d] = s; + v = kdt->innerbuf.curboxmax.ptr.p_double[d]; + kdt->innerbuf.curboxmax.ptr.p_double[d] = s; nearestneighbor_kdtreegeneratetreerec(kdt, nodesoffs, splitsoffs, i1, i3, maxleafsize, _state); - kdt->curboxmax.ptr.p_double[d] = v; + kdt->innerbuf.curboxmax.ptr.p_double[d] = v; kdt->nodes.ptr.p_int[oldoffs+4] = *nodesoffs; - v = kdt->curboxmin.ptr.p_double[d]; - kdt->curboxmin.ptr.p_double[d] = s; + v = kdt->innerbuf.curboxmin.ptr.p_double[d]; + kdt->innerbuf.curboxmin.ptr.p_double[d] = s; nearestneighbor_kdtreegeneratetreerec(kdt, nodesoffs, splitsoffs, i3, i2, maxleafsize, _state); - kdt->curboxmin.ptr.p_double[d] = v; + kdt->innerbuf.curboxmin.ptr.p_double[d] = v; + + /* + * Zero-fill unused portions of the node (avoid false warnings by Valgrind + * about attempt to serialize uninitialized values) + */ + ae_assert(nearestneighbor_splitnodesize==6, "KDTreeGenerateTreeRec: node size has unexpectedly changed", _state); + kdt->nodes.ptr.p_int[oldoffs+5] = 0; } @@ -3985,6 +6378,7 @@ Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ static void nearestneighbor_kdtreequerynnrec(kdtree* kdt, + kdtreerequestbuffer* buf, ae_int_t offs, ae_state *_state) { @@ -4029,28 +6423,28 @@ { for(j=0; j<=nx-1; j++) { - ptdist = ae_maxreal(ptdist, ae_fabs(kdt->xy.ptr.pp_double[i][j]-kdt->x.ptr.p_double[j], _state), _state); + ptdist = ae_maxreal(ptdist, ae_fabs(kdt->xy.ptr.pp_double[i][j]-buf->x.ptr.p_double[j], _state), _state); } } if( kdt->normtype==1 ) { for(j=0; j<=nx-1; j++) { - ptdist = ptdist+ae_fabs(kdt->xy.ptr.pp_double[i][j]-kdt->x.ptr.p_double[j], _state); + ptdist = ptdist+ae_fabs(kdt->xy.ptr.pp_double[i][j]-buf->x.ptr.p_double[j], _state); } } if( kdt->normtype==2 ) { for(j=0; j<=nx-1; j++) { - ptdist = ptdist+ae_sqr(kdt->xy.ptr.pp_double[i][j]-kdt->x.ptr.p_double[j], _state); + ptdist = ptdist+ae_sqr(kdt->xy.ptr.pp_double[i][j]-buf->x.ptr.p_double[j], _state); } } /* * Skip points with zero distance if self-matches are turned off */ - if( ae_fp_eq(ptdist,(double)(0))&&!kdt->selfmatch ) + if( ptdist==0&&!buf->selfmatch ) { continue; } @@ -4059,7 +6453,7 @@ * We CAN'T process point if R-criterion isn't satisfied, * i.e. (RNeeded<>0) AND (PtDist>R). */ - if( ae_fp_eq(kdt->rneeded,(double)(0))||ae_fp_less_eq(ptdist,kdt->rneeded) ) + if( buf->rneeded==0||ptdist<=buf->rneeded ) { /* @@ -4068,13 +6462,13 @@ * (or skip, if worst point is better) * * add point without replacement otherwise */ - if( kdt->kcurkneeded||kdt->kneeded==0 ) + if( buf->kcurkneeded||buf->kneeded==0 ) { /* * add current point to heap without replacement */ - tagheappushi(&kdt->r, &kdt->idx, &kdt->kcur, ptdist, i, _state); + tagheappushi(&buf->r, &buf->idx, &buf->kcur, ptdist, i, _state); } else { @@ -4083,16 +6477,16 @@ * New points are added or not, depending on their distance. * If added, they replace element at the top of the heap */ - if( ae_fp_less(ptdist,kdt->r.ptr.p_double[0]) ) + if( ptdistr.ptr.p_double[0] ) { - if( kdt->kneeded==1 ) + if( buf->kneeded==1 ) { - kdt->idx.ptr.p_int[0] = i; - kdt->r.ptr.p_double[0] = ptdist; + buf->idx.ptr.p_int[0] = i; + buf->r.ptr.p_double[0] = ptdist; } else { - tagheapreplacetopi(&kdt->r, &kdt->idx, kdt->kneeded, ptdist, i, _state); + tagheapreplacetopi(&buf->r, &buf->idx, buf->kneeded, ptdist, i, _state); } } } @@ -4120,7 +6514,7 @@ * * ChildBestOffs child box with best chances * * ChildWorstOffs child box with worst chances */ - if( ae_fp_less_eq(kdt->x.ptr.p_double[d],s) ) + if( buf->x.ptr.p_double[d]<=s ) { childbestoffs = kdt->nodes.ptr.p_int[offs+3]; childworstoffs = kdt->nodes.ptr.p_int[offs+4]; @@ -4161,59 +6555,59 @@ */ if( updatemin ) { - prevdist = kdt->curdist; - t1 = kdt->x.ptr.p_double[d]; - v = kdt->curboxmin.ptr.p_double[d]; - if( ae_fp_less_eq(t1,s) ) + prevdist = buf->curdist; + t1 = buf->x.ptr.p_double[d]; + v = buf->curboxmin.ptr.p_double[d]; + if( t1<=s ) { if( kdt->normtype==0 ) { - kdt->curdist = ae_maxreal(kdt->curdist, s-t1, _state); + buf->curdist = ae_maxreal(buf->curdist, s-t1, _state); } if( kdt->normtype==1 ) { - kdt->curdist = kdt->curdist-ae_maxreal(v-t1, (double)(0), _state)+s-t1; + buf->curdist = buf->curdist-ae_maxreal(v-t1, (double)(0), _state)+s-t1; } if( kdt->normtype==2 ) { - kdt->curdist = kdt->curdist-ae_sqr(ae_maxreal(v-t1, (double)(0), _state), _state)+ae_sqr(s-t1, _state); + buf->curdist = buf->curdist-ae_sqr(ae_maxreal(v-t1, (double)(0), _state), _state)+ae_sqr(s-t1, _state); } } - kdt->curboxmin.ptr.p_double[d] = s; + buf->curboxmin.ptr.p_double[d] = s; } else { - prevdist = kdt->curdist; - t1 = kdt->x.ptr.p_double[d]; - v = kdt->curboxmax.ptr.p_double[d]; - if( ae_fp_greater_eq(t1,s) ) + prevdist = buf->curdist; + t1 = buf->x.ptr.p_double[d]; + v = buf->curboxmax.ptr.p_double[d]; + if( t1>=s ) { if( kdt->normtype==0 ) { - kdt->curdist = ae_maxreal(kdt->curdist, t1-s, _state); + buf->curdist = ae_maxreal(buf->curdist, t1-s, _state); } if( kdt->normtype==1 ) { - kdt->curdist = kdt->curdist-ae_maxreal(t1-v, (double)(0), _state)+t1-s; + buf->curdist = buf->curdist-ae_maxreal(t1-v, (double)(0), _state)+t1-s; } if( kdt->normtype==2 ) { - kdt->curdist = kdt->curdist-ae_sqr(ae_maxreal(t1-v, (double)(0), _state), _state)+ae_sqr(t1-s, _state); + buf->curdist = buf->curdist-ae_sqr(ae_maxreal(t1-v, (double)(0), _state), _state)+ae_sqr(t1-s, _state); } } - kdt->curboxmax.ptr.p_double[d] = s; + buf->curboxmax.ptr.p_double[d] = s; } /* * Decide: to dive into cell or not to dive */ - if( ae_fp_neq(kdt->rneeded,(double)(0))&&ae_fp_greater(kdt->curdist,kdt->rneeded) ) + if( buf->rneeded!=0&&buf->curdist>buf->rneeded ) { todive = ae_false; } else { - if( kdt->kcurkneeded||kdt->kneeded==0 ) + if( buf->kcurkneeded||buf->kneeded==0 ) { /* @@ -4228,298 +6622,921 @@ * KCur=KNeeded, decide to dive or not to dive * using point position relative to bounding box. */ - todive = ae_fp_less_eq(kdt->curdist,kdt->r.ptr.p_double[0]*kdt->approxf); + todive = buf->curdist<=buf->r.ptr.p_double[0]*buf->approxf; } } if( todive ) { - nearestneighbor_kdtreequerynnrec(kdt, childoffs, _state); + nearestneighbor_kdtreequerynnrec(kdt, buf, childoffs, _state); } /* * Restore bounding box and distance */ - if( updatemin ) - { - kdt->curboxmin.ptr.p_double[d] = v; - } - else - { - kdt->curboxmax.ptr.p_double[d] = v; - } - kdt->curdist = prevdist; + if( updatemin ) + { + buf->curboxmin.ptr.p_double[d] = v; + } + else + { + buf->curboxmax.ptr.p_double[d] = v; + } + buf->curdist = prevdist; + } + return; + } +} + + +/************************************************************************* +Recursive subroutine for box queries. + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +static void nearestneighbor_kdtreequeryboxrec(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_int_t offs, + ae_state *_state) +{ + ae_bool inbox; + ae_int_t nx; + ae_int_t i1; + ae_int_t i2; + ae_int_t i; + ae_int_t j; + ae_int_t d; + double s; + double v; + + + ae_assert(kdt->n>0, "KDTreeQueryBoxRec: internal error", _state); + nx = kdt->nx; + + /* + * Check that intersection of query box with bounding box is non-empty. + * This check is performed once for Offs=0 (tree root). + */ + if( offs==0 ) + { + for(j=0; j<=nx-1; j++) + { + if( buf->boxmin.ptr.p_double[j]>buf->curboxmax.ptr.p_double[j] ) + { + return; + } + if( buf->boxmax.ptr.p_double[j]curboxmin.ptr.p_double[j] ) + { + return; + } + } + } + + /* + * Leaf node. + * Process points. + */ + if( kdt->nodes.ptr.p_int[offs]>0 ) + { + i1 = kdt->nodes.ptr.p_int[offs+1]; + i2 = i1+kdt->nodes.ptr.p_int[offs]; + for(i=i1; i<=i2-1; i++) + { + + /* + * Check whether point is in box or not + */ + inbox = ae_true; + for(j=0; j<=nx-1; j++) + { + inbox = inbox&&kdt->xy.ptr.pp_double[i][j]>=buf->boxmin.ptr.p_double[j]; + inbox = inbox&&kdt->xy.ptr.pp_double[i][j]<=buf->boxmax.ptr.p_double[j]; + } + if( !inbox ) + { + continue; + } + + /* + * Add point to unordered list + */ + buf->r.ptr.p_double[buf->kcur] = 0.0; + buf->idx.ptr.p_int[buf->kcur] = i; + buf->kcur = buf->kcur+1; + } + return; + } + + /* + * Simple split + */ + if( kdt->nodes.ptr.p_int[offs]==0 ) + { + + /* + * Load: + * * D dimension to split + * * S split position + */ + d = kdt->nodes.ptr.p_int[offs+1]; + s = kdt->splits.ptr.p_double[kdt->nodes.ptr.p_int[offs+2]]; + + /* + * Check lower split (S is upper bound of new bounding box) + */ + if( s>=buf->boxmin.ptr.p_double[d] ) + { + v = buf->curboxmax.ptr.p_double[d]; + buf->curboxmax.ptr.p_double[d] = s; + nearestneighbor_kdtreequeryboxrec(kdt, buf, kdt->nodes.ptr.p_int[offs+3], _state); + buf->curboxmax.ptr.p_double[d] = v; + } + + /* + * Check upper split (S is lower bound of new bounding box) + */ + if( s<=buf->boxmax.ptr.p_double[d] ) + { + v = buf->curboxmin.ptr.p_double[d]; + buf->curboxmin.ptr.p_double[d] = s; + nearestneighbor_kdtreequeryboxrec(kdt, buf, kdt->nodes.ptr.p_int[offs+4], _state); + buf->curboxmin.ptr.p_double[d] = v; + } + return; + } +} + + +/************************************************************************* +Copies X[] to Buf.X[] +Loads distance from X[] to bounding box. +Initializes Buf.CurBox[]. + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +static void nearestneighbor_kdtreeinitbox(kdtree* kdt, + /* Real */ ae_vector* x, + kdtreerequestbuffer* buf, + ae_state *_state) +{ + ae_int_t i; + double vx; + double vmin; + double vmax; + + + ae_assert(kdt->n>0, "KDTreeInitBox: internal error", _state); + + /* + * calculate distance from point to current bounding box + */ + buf->curdist = (double)(0); + if( kdt->normtype==0 ) + { + for(i=0; i<=kdt->nx-1; i++) + { + vx = x->ptr.p_double[i]; + vmin = kdt->boxmin.ptr.p_double[i]; + vmax = kdt->boxmax.ptr.p_double[i]; + buf->x.ptr.p_double[i] = vx; + buf->curboxmin.ptr.p_double[i] = vmin; + buf->curboxmax.ptr.p_double[i] = vmax; + if( vxcurdist = ae_maxreal(buf->curdist, vmin-vx, _state); + } + else + { + if( vx>vmax ) + { + buf->curdist = ae_maxreal(buf->curdist, vx-vmax, _state); + } + } + } + } + if( kdt->normtype==1 ) + { + for(i=0; i<=kdt->nx-1; i++) + { + vx = x->ptr.p_double[i]; + vmin = kdt->boxmin.ptr.p_double[i]; + vmax = kdt->boxmax.ptr.p_double[i]; + buf->x.ptr.p_double[i] = vx; + buf->curboxmin.ptr.p_double[i] = vmin; + buf->curboxmax.ptr.p_double[i] = vmax; + if( vxcurdist = buf->curdist+vmin-vx; + } + else + { + if( vx>vmax ) + { + buf->curdist = buf->curdist+vx-vmax; + } + } + } + } + if( kdt->normtype==2 ) + { + for(i=0; i<=kdt->nx-1; i++) + { + vx = x->ptr.p_double[i]; + vmin = kdt->boxmin.ptr.p_double[i]; + vmax = kdt->boxmax.ptr.p_double[i]; + buf->x.ptr.p_double[i] = vx; + buf->curboxmin.ptr.p_double[i] = vmin; + buf->curboxmax.ptr.p_double[i] = vmax; + if( vxcurdist = buf->curdist+ae_sqr(vmin-vx, _state); + } + else + { + if( vx>vmax ) + { + buf->curdist = buf->curdist+ae_sqr(vx-vmax, _state); + } + } + } + } +} + + +/************************************************************************* +This function allocates all dataset-independend array fields of KDTree, +i.e. such array fields that their dimensions do not depend on dataset +size. + +This function do not sets KDT.NX or KDT.NY - it just allocates arrays + + -- ALGLIB -- + Copyright 14.03.2011 by Bochkanov Sergey +*************************************************************************/ +static void nearestneighbor_kdtreeallocdatasetindependent(kdtree* kdt, + ae_int_t nx, + ae_int_t ny, + ae_state *_state) +{ + + + ae_assert(kdt->n>0, "KDTreeAllocDatasetIndependent: internal error", _state); + ae_vector_set_length(&kdt->boxmin, nx, _state); + ae_vector_set_length(&kdt->boxmax, nx, _state); +} + + +/************************************************************************* +This function allocates all dataset-dependent array fields of KDTree, i.e. +such array fields that their dimensions depend on dataset size. + +This function do not sets KDT.N, KDT.NX or KDT.NY - +it just allocates arrays. + + -- ALGLIB -- + Copyright 14.03.2011 by Bochkanov Sergey +*************************************************************************/ +static void nearestneighbor_kdtreeallocdatasetdependent(kdtree* kdt, + ae_int_t n, + ae_int_t nx, + ae_int_t ny, + ae_state *_state) +{ + + + ae_assert(n>0, "KDTreeAllocDatasetDependent: internal error", _state); + ae_matrix_set_length(&kdt->xy, n, 2*nx+ny, _state); + ae_vector_set_length(&kdt->tags, n, _state); + ae_vector_set_length(&kdt->nodes, nearestneighbor_splitnodesize*2*n, _state); + ae_vector_set_length(&kdt->splits, 2*n, _state); +} + + +/************************************************************************* +This function checks consistency of request buffer structure with +dimensions of kd-tree object. + + -- ALGLIB -- + Copyright 02.04.2016 by Bochkanov Sergey +*************************************************************************/ +static void nearestneighbor_checkrequestbufferconsistency(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_state *_state) +{ + + + ae_assert(buf->x.cnt>=kdt->nx, "KDTree: dimensions of kdtreerequestbuffer are inconsistent with kdtree structure", _state); + ae_assert(buf->idx.cnt>=kdt->n, "KDTree: dimensions of kdtreerequestbuffer are inconsistent with kdtree structure", _state); + ae_assert(buf->r.cnt>=kdt->n, "KDTree: dimensions of kdtreerequestbuffer are inconsistent with kdtree structure", _state); + ae_assert(buf->buf.cnt>=ae_maxint(kdt->n, kdt->nx, _state), "KDTree: dimensions of kdtreerequestbuffer are inconsistent with kdtree structure", _state); + ae_assert(buf->curboxmin.cnt>=kdt->nx, "KDTree: dimensions of kdtreerequestbuffer are inconsistent with kdtree structure", _state); + ae_assert(buf->curboxmax.cnt>=kdt->nx, "KDTree: dimensions of kdtreerequestbuffer are inconsistent with kdtree structure", _state); +} + + +void _kdtreerequestbuffer_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + kdtreerequestbuffer *p = (kdtreerequestbuffer*)_p; + ae_touch_ptr((void*)p); + ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->boxmin, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->boxmax, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->idx, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->r, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->buf, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->curboxmin, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->curboxmax, 0, DT_REAL, _state, make_automatic); +} + + +void _kdtreerequestbuffer_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + kdtreerequestbuffer *dst = (kdtreerequestbuffer*)_dst; + kdtreerequestbuffer *src = (kdtreerequestbuffer*)_src; + ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic); + ae_vector_init_copy(&dst->boxmin, &src->boxmin, _state, make_automatic); + ae_vector_init_copy(&dst->boxmax, &src->boxmax, _state, make_automatic); + dst->kneeded = src->kneeded; + dst->rneeded = src->rneeded; + dst->selfmatch = src->selfmatch; + dst->approxf = src->approxf; + dst->kcur = src->kcur; + ae_vector_init_copy(&dst->idx, &src->idx, _state, make_automatic); + ae_vector_init_copy(&dst->r, &src->r, _state, make_automatic); + ae_vector_init_copy(&dst->buf, &src->buf, _state, make_automatic); + ae_vector_init_copy(&dst->curboxmin, &src->curboxmin, _state, make_automatic); + ae_vector_init_copy(&dst->curboxmax, &src->curboxmax, _state, make_automatic); + dst->curdist = src->curdist; +} + + +void _kdtreerequestbuffer_clear(void* _p) +{ + kdtreerequestbuffer *p = (kdtreerequestbuffer*)_p; + ae_touch_ptr((void*)p); + ae_vector_clear(&p->x); + ae_vector_clear(&p->boxmin); + ae_vector_clear(&p->boxmax); + ae_vector_clear(&p->idx); + ae_vector_clear(&p->r); + ae_vector_clear(&p->buf); + ae_vector_clear(&p->curboxmin); + ae_vector_clear(&p->curboxmax); +} + + +void _kdtreerequestbuffer_destroy(void* _p) +{ + kdtreerequestbuffer *p = (kdtreerequestbuffer*)_p; + ae_touch_ptr((void*)p); + ae_vector_destroy(&p->x); + ae_vector_destroy(&p->boxmin); + ae_vector_destroy(&p->boxmax); + ae_vector_destroy(&p->idx); + ae_vector_destroy(&p->r); + ae_vector_destroy(&p->buf); + ae_vector_destroy(&p->curboxmin); + ae_vector_destroy(&p->curboxmax); +} + + +void _kdtree_init(void* _p, ae_state *_state, ae_bool make_automatic) +{ + kdtree *p = (kdtree*)_p; + ae_touch_ptr((void*)p); + ae_matrix_init(&p->xy, 0, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->tags, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->boxmin, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->boxmax, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->nodes, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->splits, 0, DT_REAL, _state, make_automatic); + _kdtreerequestbuffer_init(&p->innerbuf, _state, make_automatic); +} + + +void _kdtree_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) +{ + kdtree *dst = (kdtree*)_dst; + kdtree *src = (kdtree*)_src; + dst->n = src->n; + dst->nx = src->nx; + dst->ny = src->ny; + dst->normtype = src->normtype; + ae_matrix_init_copy(&dst->xy, &src->xy, _state, make_automatic); + ae_vector_init_copy(&dst->tags, &src->tags, _state, make_automatic); + ae_vector_init_copy(&dst->boxmin, &src->boxmin, _state, make_automatic); + ae_vector_init_copy(&dst->boxmax, &src->boxmax, _state, make_automatic); + ae_vector_init_copy(&dst->nodes, &src->nodes, _state, make_automatic); + ae_vector_init_copy(&dst->splits, &src->splits, _state, make_automatic); + _kdtreerequestbuffer_init_copy(&dst->innerbuf, &src->innerbuf, _state, make_automatic); + dst->debugcounter = src->debugcounter; +} + + +void _kdtree_clear(void* _p) +{ + kdtree *p = (kdtree*)_p; + ae_touch_ptr((void*)p); + ae_matrix_clear(&p->xy); + ae_vector_clear(&p->tags); + ae_vector_clear(&p->boxmin); + ae_vector_clear(&p->boxmax); + ae_vector_clear(&p->nodes); + ae_vector_clear(&p->splits); + _kdtreerequestbuffer_clear(&p->innerbuf); +} + + +void _kdtree_destroy(void* _p) +{ + kdtree *p = (kdtree*)_p; + ae_touch_ptr((void*)p); + ae_matrix_destroy(&p->xy); + ae_vector_destroy(&p->tags); + ae_vector_destroy(&p->boxmin); + ae_vector_destroy(&p->boxmax); + ae_vector_destroy(&p->nodes); + ae_vector_destroy(&p->splits); + _kdtreerequestbuffer_destroy(&p->innerbuf); +} + + +#endif +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) + + +/************************************************************************* +HQRNDState initialization with random values which come from standard +RNG. + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndrandomize(hqrndstate* state, ae_state *_state) +{ + ae_int_t s0; + ae_int_t s1; + + _hqrndstate_clear(state); + + s0 = ae_randominteger(hqrnd_hqrndm1, _state); + s1 = ae_randominteger(hqrnd_hqrndm2, _state); + hqrndseed(s0, s1, state, _state); +} + + +/************************************************************************* +HQRNDState initialization with seed values + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndseed(ae_int_t s1, + ae_int_t s2, + hqrndstate* state, + ae_state *_state) +{ + + _hqrndstate_clear(state); + + + /* + * Protection against negative seeds: + * + * SEED := -(SEED+1) + * + * We can use just "-SEED" because there exists such integer number N + * that N<0, -N=N<0 too. (This number is equal to 0x800...000). Need + * to handle such seed correctly forces us to use a bit complicated + * formula. + */ + if( s1<0 ) + { + s1 = -(s1+1); + } + if( s2<0 ) + { + s2 = -(s2+1); + } + state->s1 = s1%(hqrnd_hqrndm1-1)+1; + state->s2 = s2%(hqrnd_hqrndm2-1)+1; + state->magicv = hqrnd_hqrndmagic; +} + + +/************************************************************************* +This function generates random real number in (0,1), +not including interval boundaries + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +double hqrnduniformr(hqrndstate* state, ae_state *_state) +{ + double result; + + + result = (double)(hqrnd_hqrndintegerbase(state, _state)+1)/(double)(hqrnd_hqrndmax+2); + return result; +} + + +/************************************************************************* +This function generates random integer number in [0, N) + +1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() +2. N can be any positive number except for very large numbers: + * close to 2^31 on 32-bit systems + * close to 2^62 on 64-bit systems + An exception will be generated if N is too large. + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +ae_int_t hqrnduniformi(hqrndstate* state, ae_int_t n, ae_state *_state) +{ + ae_int_t maxcnt; + ae_int_t mx; + ae_int_t a; + ae_int_t b; + ae_int_t result; + + + ae_assert(n>0, "HQRNDUniformI: N<=0!", _state); + maxcnt = hqrnd_hqrndmax+1; + + /* + * Two branches: one for N<=MaxCnt, another for N>MaxCnt. + */ + if( n>maxcnt ) + { + + /* + * N>=MaxCnt. + * + * We have two options here: + * a) N is exactly divisible by MaxCnt + * b) N is not divisible by MaxCnt + * + * In both cases we reduce problem on interval spanning [0,N) + * to several subproblems on intervals spanning [0,MaxCnt). + */ + if( n%maxcnt==0 ) + { + + /* + * N is exactly divisible by MaxCnt. + * + * [0,N) range is dividided into N/MaxCnt bins, + * each of them having length equal to MaxCnt. + * + * We generate: + * * random bin number B + * * random offset within bin A + * Both random numbers are generated by recursively + * calling HQRNDUniformI(). + * + * Result is equal to A+MaxCnt*B. + */ + ae_assert(n/maxcnt<=maxcnt, "HQRNDUniformI: N is too large", _state); + a = hqrnduniformi(state, maxcnt, _state); + b = hqrnduniformi(state, n/maxcnt, _state); + result = a+maxcnt*b; + } + else + { + + /* + * N is NOT exactly divisible by MaxCnt. + * + * [0,N) range is dividided into Ceil(N/MaxCnt) bins, + * each of them having length equal to MaxCnt. + * + * We generate: + * * random bin number B in [0, Ceil(N/MaxCnt)-1] + * * random offset within bin A + * * if both of what is below is true + * 1) bin number B is that of the last bin + * 2) A >= N mod MaxCnt + * then we repeat generation of A/B. + * This stage is essential in order to avoid bias in the result. + * * otherwise, we return A*MaxCnt+N + */ + ae_assert(n/maxcnt+1<=maxcnt, "HQRNDUniformI: N is too large", _state); + result = -1; + do + { + a = hqrnduniformi(state, maxcnt, _state); + b = hqrnduniformi(state, n/maxcnt+1, _state); + if( b==n/maxcnt&&a>=n%maxcnt ) + { + continue; + } + result = a+maxcnt*b; + } + while(result<0); + } + } + else + { + + /* + * N<=MaxCnt + * + * Code below is a bit complicated because we can not simply + * return "HQRNDIntegerBase() mod N" - it will be skewed for + * large N's in [0.1*HQRNDMax...HQRNDMax]. + */ + mx = maxcnt-maxcnt%n; + do + { + result = hqrnd_hqrndintegerbase(state, _state); + } + while(result>=mx); + result = result%n; + } + return result; +} + + +/************************************************************************* +Random number generator: normal numbers + +This function generates one random number from normal distribution. +Its performance is equal to that of HQRNDNormal2() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +double hqrndnormal(hqrndstate* state, ae_state *_state) +{ + double v1; + double v2; + double result; + + + hqrndnormal2(state, &v1, &v2, _state); + result = v1; + return result; +} + + +/************************************************************************* +Random number generator: random X and Y such that X^2+Y^2=1 + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndunit2(hqrndstate* state, double* x, double* y, ae_state *_state) +{ + double v; + double mx; + double mn; + + *x = 0; + *y = 0; + + do + { + hqrndnormal2(state, x, y, _state); + } + while(!(ae_fp_neq(*x,(double)(0))||ae_fp_neq(*y,(double)(0)))); + mx = ae_maxreal(ae_fabs(*x, _state), ae_fabs(*y, _state), _state); + mn = ae_minreal(ae_fabs(*x, _state), ae_fabs(*y, _state), _state); + v = mx*ae_sqrt(1+ae_sqr(mn/mx, _state), _state); + *x = *x/v; + *y = *y/v; +} + + +/************************************************************************* +Random number generator: normal numbers + +This function generates two independent random numbers from normal +distribution. Its performance is equal to that of HQRNDNormal() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndnormal2(hqrndstate* state, + double* x1, + double* x2, + ae_state *_state) +{ + double u; + double v; + double s; + + *x1 = 0; + *x2 = 0; + + for(;;) + { + u = 2*hqrnduniformr(state, _state)-1; + v = 2*hqrnduniformr(state, _state)-1; + s = ae_sqr(u, _state)+ae_sqr(v, _state); + if( ae_fp_greater(s,(double)(0))&&ae_fp_less(s,(double)(1)) ) + { + + /* + * two Sqrt's instead of one to + * avoid overflow when S is too small + */ + s = ae_sqrt(-2*ae_log(s, _state), _state)/ae_sqrt(s, _state); + *x1 = u*s; + *x2 = v*s; + return; } - return; } } /************************************************************************* -Copies X[] to KDT.X[] -Loads distance from X[] to bounding box. -Initializes CurBox[]. +Random number generator: exponential distribution + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 11.08.2007 by Bochkanov Sergey *************************************************************************/ -static void nearestneighbor_kdtreeinitbox(kdtree* kdt, - /* Real */ ae_vector* x, +double hqrndexponential(hqrndstate* state, + double lambdav, ae_state *_state) { - ae_int_t i; - double vx; - double vmin; - double vmax; + double result; - ae_assert(kdt->n>0, "KDTreeInitBox: internal error", _state); - - /* - * calculate distance from point to current bounding box - */ - kdt->curdist = (double)(0); - if( kdt->normtype==0 ) - { - for(i=0; i<=kdt->nx-1; i++) - { - vx = x->ptr.p_double[i]; - vmin = kdt->boxmin.ptr.p_double[i]; - vmax = kdt->boxmax.ptr.p_double[i]; - kdt->x.ptr.p_double[i] = vx; - kdt->curboxmin.ptr.p_double[i] = vmin; - kdt->curboxmax.ptr.p_double[i] = vmax; - if( ae_fp_less(vx,vmin) ) - { - kdt->curdist = ae_maxreal(kdt->curdist, vmin-vx, _state); - } - else - { - if( ae_fp_greater(vx,vmax) ) - { - kdt->curdist = ae_maxreal(kdt->curdist, vx-vmax, _state); - } - } - } - } - if( kdt->normtype==1 ) - { - for(i=0; i<=kdt->nx-1; i++) - { - vx = x->ptr.p_double[i]; - vmin = kdt->boxmin.ptr.p_double[i]; - vmax = kdt->boxmax.ptr.p_double[i]; - kdt->x.ptr.p_double[i] = vx; - kdt->curboxmin.ptr.p_double[i] = vmin; - kdt->curboxmax.ptr.p_double[i] = vmax; - if( ae_fp_less(vx,vmin) ) - { - kdt->curdist = kdt->curdist+vmin-vx; - } - else - { - if( ae_fp_greater(vx,vmax) ) - { - kdt->curdist = kdt->curdist+vx-vmax; - } - } - } - } - if( kdt->normtype==2 ) - { - for(i=0; i<=kdt->nx-1; i++) - { - vx = x->ptr.p_double[i]; - vmin = kdt->boxmin.ptr.p_double[i]; - vmax = kdt->boxmax.ptr.p_double[i]; - kdt->x.ptr.p_double[i] = vx; - kdt->curboxmin.ptr.p_double[i] = vmin; - kdt->curboxmax.ptr.p_double[i] = vmax; - if( ae_fp_less(vx,vmin) ) - { - kdt->curdist = kdt->curdist+ae_sqr(vmin-vx, _state); - } - else - { - if( ae_fp_greater(vx,vmax) ) - { - kdt->curdist = kdt->curdist+ae_sqr(vx-vmax, _state); - } - } - } - } + ae_assert(ae_fp_greater(lambdav,(double)(0)), "HQRNDExponential: LambdaV<=0!", _state); + result = -ae_log(hqrnduniformr(state, _state), _state)/lambdav; + return result; } /************************************************************************* -This function allocates all dataset-independent array fields of KDTree, -i.e. such array fields that their dimensions do not depend on dataset -size. +This function generates random number from discrete distribution given by +finite sample X. -This function do not sets KDT.NX or KDT.NY - it just allocates arrays +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample + N - number of elements to use, N>=1 + +RESULT + this function returns one of the X[i] for random i=0..N-1 -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey + Copyright 08.11.2011 by Bochkanov Sergey *************************************************************************/ -static void nearestneighbor_kdtreeallocdatasetindependent(kdtree* kdt, - ae_int_t nx, - ae_int_t ny, +double hqrnddiscrete(hqrndstate* state, + /* Real */ ae_vector* x, + ae_int_t n, ae_state *_state) { + double result; - ae_assert(kdt->n>0, "KDTreeAllocDatasetIndependent: internal error", _state); - ae_vector_set_length(&kdt->x, nx, _state); - ae_vector_set_length(&kdt->boxmin, nx, _state); - ae_vector_set_length(&kdt->boxmax, nx, _state); - ae_vector_set_length(&kdt->curboxmin, nx, _state); - ae_vector_set_length(&kdt->curboxmax, nx, _state); + ae_assert(n>0, "HQRNDDiscrete: N<=0", _state); + ae_assert(n<=x->cnt, "HQRNDDiscrete: Length(X)ptr.p_double[hqrnduniformi(state, n, _state)]; + return result; } /************************************************************************* -This function allocates all dataset-dependent array fields of KDTree, i.e. -such array fields that their dimensions depend on dataset size. +This function generates random number from continuous distribution given +by finite sample X. -This function do not sets KDT.N, KDT.NX or KDT.NY - -it just allocates arrays. +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample, array[N] (can be larger, in this case only + leading N elements are used). THIS ARRAY MUST BE SORTED BY + ASCENDING. + N - number of elements to use, N>=1 + +RESULT + this function returns random number from continuous distribution which + tries to approximate X as mush as possible. min(X)<=Result<=max(X). -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey + Copyright 08.11.2011 by Bochkanov Sergey *************************************************************************/ -static void nearestneighbor_kdtreeallocdatasetdependent(kdtree* kdt, +double hqrndcontinuous(hqrndstate* state, + /* Real */ ae_vector* x, ae_int_t n, - ae_int_t nx, - ae_int_t ny, ae_state *_state) { + double mx; + double mn; + ae_int_t i; + double result; - ae_assert(n>0, "KDTreeAllocDatasetDependent: internal error", _state); - ae_matrix_set_length(&kdt->xy, n, 2*nx+ny, _state); - ae_vector_set_length(&kdt->tags, n, _state); - ae_vector_set_length(&kdt->idx, n, _state); - ae_vector_set_length(&kdt->r, n, _state); - ae_vector_set_length(&kdt->x, nx, _state); - ae_vector_set_length(&kdt->buf, ae_maxint(n, nx, _state), _state); - ae_vector_set_length(&kdt->nodes, nearestneighbor_splitnodesize*2*n, _state); - ae_vector_set_length(&kdt->splits, 2*n, _state); + ae_assert(n>0, "HQRNDContinuous: N<=0", _state); + ae_assert(n<=x->cnt, "HQRNDContinuous: Length(X)ptr.p_double[0]; + return result; + } + i = hqrnduniformi(state, n-1, _state); + mn = x->ptr.p_double[i]; + mx = x->ptr.p_double[i+1]; + ae_assert(ae_fp_greater_eq(mx,mn), "HQRNDDiscrete: X is not sorted by ascending", _state); + if( ae_fp_neq(mx,mn) ) + { + result = (mx-mn)*hqrnduniformr(state, _state)+mn; + } + else + { + result = mn; + } + return result; } /************************************************************************* -This function allocates temporaries. - -This function do not sets KDT.N, KDT.NX or KDT.NY - -it just allocates arrays. +This function returns random integer in [0,HQRNDMax] - -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey +L'Ecuyer, Efficient and portable combined random number generators *************************************************************************/ -static void nearestneighbor_kdtreealloctemporaries(kdtree* kdt, - ae_int_t n, - ae_int_t nx, - ae_int_t ny, +static ae_int_t hqrnd_hqrndintegerbase(hqrndstate* state, ae_state *_state) { + ae_int_t k; + ae_int_t result; - ae_assert(n>0, "KDTreeAllocTemporaries: internal error", _state); - ae_vector_set_length(&kdt->x, nx, _state); - ae_vector_set_length(&kdt->idx, n, _state); - ae_vector_set_length(&kdt->r, n, _state); - ae_vector_set_length(&kdt->buf, ae_maxint(n, nx, _state), _state); - ae_vector_set_length(&kdt->curboxmin, nx, _state); - ae_vector_set_length(&kdt->curboxmax, nx, _state); + ae_assert(state->magicv==hqrnd_hqrndmagic, "HQRNDIntegerBase: State is not correctly initialized!", _state); + k = state->s1/53668; + state->s1 = 40014*(state->s1-k*53668)-k*12211; + if( state->s1<0 ) + { + state->s1 = state->s1+2147483563; + } + k = state->s2/52774; + state->s2 = 40692*(state->s2-k*52774)-k*3791; + if( state->s2<0 ) + { + state->s2 = state->s2+2147483399; + } + + /* + * Result + */ + result = state->s1-state->s2; + if( result<1 ) + { + result = result+2147483562; + } + result = result-1; + return result; } -void _kdtree_init(void* _p, ae_state *_state) +void _hqrndstate_init(void* _p, ae_state *_state, ae_bool make_automatic) { - kdtree *p = (kdtree*)_p; + hqrndstate *p = (hqrndstate*)_p; ae_touch_ptr((void*)p); - ae_matrix_init(&p->xy, 0, 0, DT_REAL, _state); - ae_vector_init(&p->tags, 0, DT_INT, _state); - ae_vector_init(&p->boxmin, 0, DT_REAL, _state); - ae_vector_init(&p->boxmax, 0, DT_REAL, _state); - ae_vector_init(&p->nodes, 0, DT_INT, _state); - ae_vector_init(&p->splits, 0, DT_REAL, _state); - ae_vector_init(&p->x, 0, DT_REAL, _state); - ae_vector_init(&p->idx, 0, DT_INT, _state); - ae_vector_init(&p->r, 0, DT_REAL, _state); - ae_vector_init(&p->buf, 0, DT_REAL, _state); - ae_vector_init(&p->curboxmin, 0, DT_REAL, _state); - ae_vector_init(&p->curboxmax, 0, DT_REAL, _state); } -void _kdtree_init_copy(void* _dst, void* _src, ae_state *_state) +void _hqrndstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { - kdtree *dst = (kdtree*)_dst; - kdtree *src = (kdtree*)_src; - dst->n = src->n; - dst->nx = src->nx; - dst->ny = src->ny; - dst->normtype = src->normtype; - ae_matrix_init_copy(&dst->xy, &src->xy, _state); - ae_vector_init_copy(&dst->tags, &src->tags, _state); - ae_vector_init_copy(&dst->boxmin, &src->boxmin, _state); - ae_vector_init_copy(&dst->boxmax, &src->boxmax, _state); - ae_vector_init_copy(&dst->nodes, &src->nodes, _state); - ae_vector_init_copy(&dst->splits, &src->splits, _state); - ae_vector_init_copy(&dst->x, &src->x, _state); - dst->kneeded = src->kneeded; - dst->rneeded = src->rneeded; - dst->selfmatch = src->selfmatch; - dst->approxf = src->approxf; - dst->kcur = src->kcur; - ae_vector_init_copy(&dst->idx, &src->idx, _state); - ae_vector_init_copy(&dst->r, &src->r, _state); - ae_vector_init_copy(&dst->buf, &src->buf, _state); - ae_vector_init_copy(&dst->curboxmin, &src->curboxmin, _state); - ae_vector_init_copy(&dst->curboxmax, &src->curboxmax, _state); - dst->curdist = src->curdist; - dst->debugcounter = src->debugcounter; + hqrndstate *dst = (hqrndstate*)_dst; + hqrndstate *src = (hqrndstate*)_src; + dst->s1 = src->s1; + dst->s2 = src->s2; + dst->magicv = src->magicv; } -void _kdtree_clear(void* _p) +void _hqrndstate_clear(void* _p) { - kdtree *p = (kdtree*)_p; + hqrndstate *p = (hqrndstate*)_p; ae_touch_ptr((void*)p); - ae_matrix_clear(&p->xy); - ae_vector_clear(&p->tags); - ae_vector_clear(&p->boxmin); - ae_vector_clear(&p->boxmax); - ae_vector_clear(&p->nodes); - ae_vector_clear(&p->splits); - ae_vector_clear(&p->x); - ae_vector_clear(&p->idx); - ae_vector_clear(&p->r); - ae_vector_clear(&p->buf); - ae_vector_clear(&p->curboxmin); - ae_vector_clear(&p->curboxmax); } -void _kdtree_destroy(void* _p) +void _hqrndstate_destroy(void* _p) { - kdtree *p = (kdtree*)_p; + hqrndstate *p = (hqrndstate*)_p; ae_touch_ptr((void*)p); - ae_matrix_destroy(&p->xy); - ae_vector_destroy(&p->tags); - ae_vector_destroy(&p->boxmin); - ae_vector_destroy(&p->boxmax); - ae_vector_destroy(&p->nodes); - ae_vector_destroy(&p->splits); - ae_vector_destroy(&p->x); - ae_vector_destroy(&p->idx); - ae_vector_destroy(&p->r); - ae_vector_destroy(&p->buf); - ae_vector_destroy(&p->curboxmin); - ae_vector_destroy(&p->curboxmax); } +#endif +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) /************************************************************************* @@ -4613,7 +7630,8 @@ ae_vector b; ae_frame_make(_state, &_frame_block); - ae_vector_init(&b, 0, DT_BOOL, _state); + memset(&b, 0, sizeof(b)); + ae_vector_init(&b, 0, DT_BOOL, _state, ae_true); ae_vector_set_length(&b, a->cnt, _state); for(i=0; i<=b.cnt-1; i++) @@ -4718,7 +7736,8 @@ ae_vector b; ae_frame_make(_state, &_frame_block); - ae_vector_init(&b, 0, DT_INT, _state); + memset(&b, 0, sizeof(b)); + ae_vector_init(&b, 0, DT_INT, _state, ae_true); ae_vector_set_length(&b, a->cnt, _state); for(i=0; i<=b.cnt-1; i++) @@ -4832,7 +7851,8 @@ ae_vector b; ae_frame_make(_state, &_frame_block); - ae_vector_init(&b, 0, DT_REAL, _state); + memset(&b, 0, sizeof(b)); + ae_vector_init(&b, 0, DT_REAL, _state, ae_true); ae_vector_set_length(&b, a->cnt, _state); for(i=0; i<=b.cnt-1; i++) @@ -4946,7 +7966,8 @@ ae_vector b; ae_frame_make(_state, &_frame_block); - ae_vector_init(&b, 0, DT_COMPLEX, _state); + memset(&b, 0, sizeof(b)); + ae_vector_init(&b, 0, DT_COMPLEX, _state, ae_true); ae_vector_set_length(&b, a->cnt, _state); for(i=0; i<=b.cnt-1; i++) @@ -5073,7 +8094,8 @@ ae_matrix b; ae_frame_make(_state, &_frame_block); - ae_matrix_init(&b, 0, 0, DT_BOOL, _state); + memset(&b, 0, sizeof(b)); + ae_matrix_init(&b, 0, 0, DT_BOOL, _state, ae_true); ae_matrix_set_length(&b, a->rows, a->cols, _state); for(i=0; i<=b.rows-1; i++) @@ -5198,7 +8220,8 @@ ae_matrix b; ae_frame_make(_state, &_frame_block); - ae_matrix_init(&b, 0, 0, DT_INT, _state); + memset(&b, 0, sizeof(b)); + ae_matrix_init(&b, 0, 0, DT_INT, _state, ae_true); ae_matrix_set_length(&b, a->rows, a->cols, _state); for(i=0; i<=b.rows-1; i++) @@ -5323,7 +8346,8 @@ ae_matrix b; ae_frame_make(_state, &_frame_block); - ae_matrix_init(&b, 0, 0, DT_REAL, _state); + memset(&b, 0, sizeof(b)); + ae_matrix_init(&b, 0, 0, DT_REAL, _state, ae_true); ae_matrix_set_length(&b, a->rows, a->cols, _state); for(i=0; i<=b.rows-1; i++) @@ -5448,7 +8472,8 @@ ae_matrix b; ae_frame_make(_state, &_frame_block); - ae_matrix_init(&b, 0, 0, DT_COMPLEX, _state); + memset(&b, 0, sizeof(b)); + ae_matrix_init(&b, 0, 0, DT_COMPLEX, _state, ae_true); ae_matrix_set_length(&b, a->rows, a->cols, _state); for(i=0; i<=b.rows-1; i++) @@ -5544,21 +8569,21 @@ } -void _xdebugrecord1_init(void* _p, ae_state *_state) +void _xdebugrecord1_init(void* _p, ae_state *_state, ae_bool make_automatic) { xdebugrecord1 *p = (xdebugrecord1*)_p; ae_touch_ptr((void*)p); - ae_vector_init(&p->a, 0, DT_REAL, _state); + ae_vector_init(&p->a, 0, DT_REAL, _state, make_automatic); } -void _xdebugrecord1_init_copy(void* _dst, void* _src, ae_state *_state) +void _xdebugrecord1_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { xdebugrecord1 *dst = (xdebugrecord1*)_dst; xdebugrecord1 *src = (xdebugrecord1*)_src; dst->i = src->i; dst->c = src->c; - ae_vector_init_copy(&dst->a, &src->a, _state); + ae_vector_init_copy(&dst->a, &src->a, _state, make_automatic); } @@ -5578,6 +8603,7 @@ } +#endif } diff -Nru alglib-3.10.0/src/alglibmisc.h alglib-3.16.0/src/alglibmisc.h --- alglib-3.10.0/src/alglibmisc.h 2015-08-19 12:24:22.000000000 +0000 +++ alglib-3.16.0/src/alglibmisc.h 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -29,25 +29,12 @@ ///////////////////////////////////////////////////////////////////////// namespace alglib_impl { +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) typedef struct { - ae_int_t s1; - ae_int_t s2; - ae_int_t magicv; -} hqrndstate; -typedef struct -{ - ae_int_t n; - ae_int_t nx; - ae_int_t ny; - ae_int_t normtype; - ae_matrix xy; - ae_vector tags; + ae_vector x; ae_vector boxmin; ae_vector boxmax; - ae_vector nodes; - ae_vector splits; - ae_vector x; ae_int_t kneeded; double rneeded; ae_bool selfmatch; @@ -59,14 +46,39 @@ ae_vector curboxmin; ae_vector curboxmax; double curdist; +} kdtreerequestbuffer; +typedef struct +{ + ae_int_t n; + ae_int_t nx; + ae_int_t ny; + ae_int_t normtype; + ae_matrix xy; + ae_vector tags; + ae_vector boxmin; + ae_vector boxmax; + ae_vector nodes; + ae_vector splits; + kdtreerequestbuffer innerbuf; ae_int_t debugcounter; } kdtree; +#endif +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) +typedef struct +{ + ae_int_t s1; + ae_int_t s2; + ae_int_t magicv; +} hqrndstate; +#endif +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) typedef struct { ae_int_t i; ae_complex c; ae_vector a; } xdebugrecord1; +#endif } @@ -78,40 +90,38 @@ namespace alglib { +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Portable high quality random number generator state. -Initialized with HQRNDRandomize() or HQRNDSeed(). +Buffer object which is used to perform nearest neighbor requests in the +multithreaded mode (multiple threads working with same KD-tree object). -Fields: - S1, S2 - seed values - V - precomputed value - MagicV - 'magic' value used to determine whether State structure - was correctly initialized. +This object should be created with KDTreeCreateRequestBuffer(). *************************************************************************/ -class _hqrndstate_owner +class _kdtreerequestbuffer_owner { public: - _hqrndstate_owner(); - _hqrndstate_owner(const _hqrndstate_owner &rhs); - _hqrndstate_owner& operator=(const _hqrndstate_owner &rhs); - virtual ~_hqrndstate_owner(); - alglib_impl::hqrndstate* c_ptr(); - alglib_impl::hqrndstate* c_ptr() const; + _kdtreerequestbuffer_owner(); + _kdtreerequestbuffer_owner(const _kdtreerequestbuffer_owner &rhs); + _kdtreerequestbuffer_owner& operator=(const _kdtreerequestbuffer_owner &rhs); + virtual ~_kdtreerequestbuffer_owner(); + alglib_impl::kdtreerequestbuffer* c_ptr(); + alglib_impl::kdtreerequestbuffer* c_ptr() const; protected: - alglib_impl::hqrndstate *p_struct; + alglib_impl::kdtreerequestbuffer *p_struct; }; -class hqrndstate : public _hqrndstate_owner +class kdtreerequestbuffer : public _kdtreerequestbuffer_owner { public: - hqrndstate(); - hqrndstate(const hqrndstate &rhs); - hqrndstate& operator=(const hqrndstate &rhs); - virtual ~hqrndstate(); + kdtreerequestbuffer(); + kdtreerequestbuffer(const kdtreerequestbuffer &rhs); + kdtreerequestbuffer& operator=(const kdtreerequestbuffer &rhs); + virtual ~kdtreerequestbuffer(); }; -/************************************************************************* +/************************************************************************* +KD-tree object. *************************************************************************/ class _kdtree_owner { @@ -134,7 +144,43 @@ virtual ~kdtree(); }; +#endif + +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +Portable high quality random number generator state. +Initialized with HQRNDRandomize() or HQRNDSeed(). + +Fields: + S1, S2 - seed values + V - precomputed value + MagicV - 'magic' value used to determine whether State structure + was correctly initialized. +*************************************************************************/ +class _hqrndstate_owner +{ +public: + _hqrndstate_owner(); + _hqrndstate_owner(const _hqrndstate_owner &rhs); + _hqrndstate_owner& operator=(const _hqrndstate_owner &rhs); + virtual ~_hqrndstate_owner(); + alglib_impl::hqrndstate* c_ptr(); + alglib_impl::hqrndstate* c_ptr() const; +protected: + alglib_impl::hqrndstate *p_struct; +}; +class hqrndstate : public _hqrndstate_owner +{ +public: + hqrndstate(); + hqrndstate(const hqrndstate &rhs); + hqrndstate& operator=(const hqrndstate &rhs); + virtual ~hqrndstate(); + +}; +#endif +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) /************************************************************************* *************************************************************************/ @@ -162,143 +208,9 @@ real_1d_array a; }; +#endif -/************************************************************************* -HQRNDState initialization with random values which come from standard -RNG. - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -void hqrndrandomize(hqrndstate &state); - - -/************************************************************************* -HQRNDState initialization with seed values - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -void hqrndseed(const ae_int_t s1, const ae_int_t s2, hqrndstate &state); - - -/************************************************************************* -This function generates random real number in (0,1), -not including interval boundaries - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -double hqrnduniformr(const hqrndstate &state); - - -/************************************************************************* -This function generates random integer number in [0, N) - -1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() -2. N can be any positive number except for very large numbers: - * close to 2^31 on 32-bit systems - * close to 2^62 on 64-bit systems - An exception will be generated if N is too large. - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -ae_int_t hqrnduniformi(const hqrndstate &state, const ae_int_t n); - - -/************************************************************************* -Random number generator: normal numbers - -This function generates one random number from normal distribution. -Its performance is equal to that of HQRNDNormal2() - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -double hqrndnormal(const hqrndstate &state); - - -/************************************************************************* -Random number generator: random X and Y such that X^2+Y^2=1 - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -void hqrndunit2(const hqrndstate &state, double &x, double &y); - - -/************************************************************************* -Random number generator: normal numbers - -This function generates two independent random numbers from normal -distribution. Its performance is equal to that of HQRNDNormal() - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 02.12.2009 by Bochkanov Sergey -*************************************************************************/ -void hqrndnormal2(const hqrndstate &state, double &x1, double &x2); - - -/************************************************************************* -Random number generator: exponential distribution - -State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). - - -- ALGLIB -- - Copyright 11.08.2007 by Bochkanov Sergey -*************************************************************************/ -double hqrndexponential(const hqrndstate &state, const double lambdav); - - -/************************************************************************* -This function generates random number from discrete distribution given by -finite sample X. - -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample - N - number of elements to use, N>=1 - -RESULT - this function returns one of the X[i] for random i=0..N-1 - - -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey -*************************************************************************/ -double hqrnddiscrete(const hqrndstate &state, const real_1d_array &x, const ae_int_t n); - - -/************************************************************************* -This function generates random number from continuous distribution given -by finite sample X. - -INPUT PARAMETERS - State - high quality random number generator, must be - initialized with HQRNDRandomize() or HQRNDSeed(). - X - finite sample, array[N] (can be larger, in this case only - leading N elements are used). THIS ARRAY MUST BE SORTED BY - ASCENDING. - N - number of elements to use, N>=1 - -RESULT - this function returns random number from continuous distribution which - tries to approximate X as mush as possible. min(X)<=Result<=max(X). - - -- ALGLIB -- - Copyright 08.11.2011 by Bochkanov Sergey -*************************************************************************/ -double hqrndcontinuous(const hqrndstate &state, const real_1d_array &x, const ae_int_t n); - +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This function serializes data structure to string. @@ -325,7 +237,29 @@ /************************************************************************* This function unserializes data structure from string. *************************************************************************/ -void kdtreeunserialize(std::string &s_in, kdtree &obj); +void kdtreeunserialize(const std::string &s_in, kdtree &obj); + + + + +/************************************************************************* +This function serializes data structure to C++ stream. + +Data stream generated by this function is same as string representation +generated by string version of serializer - alphanumeric characters, +dots, underscores, minus signs, which are grouped into words separated by +spaces and CR+LF. + +We recommend you to read comments on string version of serializer to find +out more about serialization of AlGLIB objects. +*************************************************************************/ +void kdtreeserialize(kdtree &obj, std::ostream &s_out); + + +/************************************************************************* +This function unserializes data structure from stream. +*************************************************************************/ +void kdtreeunserialize(const std::istream &s_in, kdtree &obj); /************************************************************************* @@ -363,8 +297,8 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreebuild(const real_2d_array &xy, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt); -void kdtreebuild(const real_2d_array &xy, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt); +void kdtreebuild(const real_2d_array &xy, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams = alglib::xdefault); +void kdtreebuild(const real_2d_array &xy, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -404,13 +338,49 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt); -void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt); +void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t n, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams = alglib::xdefault); +void kdtreebuildtagged(const real_2d_array &xy, const integer_1d_array &tags, const ae_int_t nx, const ae_int_t ny, const ae_int_t normtype, kdtree &kdt, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +This function creates buffer structure which can be used to perform +parallel KD-tree requests. + +KD-tree subpackage provides two sets of request functions - ones which use +internal buffer of KD-tree object (these functions are single-threaded +because they use same buffer, which can not shared between threads), and +ones which use external buffer. + +This function is used to initialize external buffer. + +INPUT PARAMETERS + KDT - KD-tree which is associated with newly created buffer + +OUTPUT PARAMETERS + Buf - external buffer. + + +IMPORTANT: KD-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use buffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +void kdtreecreaterequestbuffer(const kdtree &kdt, kdtreerequestbuffer &buf, const xparams _xparams = alglib::xdefault); /************************************************************************* K-NN query: K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryKNN() ("Ts" stands for "thread-safe"). + INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. @@ -436,17 +406,24 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch); -ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k); +ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreequeryknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const xparams _xparams = alglib::xdefault); /************************************************************************* -R-NN query: all points within R-sphere centered at X +K-NN query: K nearest neighbors, using external thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. INPUT PARAMETERS - KDT - KD-tree + KDT - kd-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. X - point, array[0..NX-1]. - R - radius of sphere (in corresponding norm), R>0 + K - number of neighbors to return, K>=1 SelfMatch - whether self-matches are allowed: * if True, nearest neighbor may be the point itself (if it exists in original dataset) @@ -455,12 +432,61 @@ * if not given, considered True RESULT - number of neighbors found, >=0 + number of actual neighbors found (either K or N, if K>N). This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -actual results: -* KDTreeQueryResultsX() to get X-values +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreetsqueryknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, ordered by distance +between point and X (by ascending). + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values * KDTreeQueryResultsXY() to get X- and Y-values * KDTreeQueryResultsTags() to get tag values * KDTreeQueryResultsDistances() to get distances @@ -468,13 +494,160 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r, const bool selfmatch); -ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r); +ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreequeryrnn(const kdtree &kdt, const real_1d_array &x, const double r, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, no ordering by +distance as undicated by "U" suffix (faster that ordered query, for large +queries - significantly faster). + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: kdtreetsqueryrnn() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +actual results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. + + -- ALGLIB -- + Copyright 01.11.2018 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreequeryrnnu(const kdtree &kdt, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreequeryrnnu(const kdtree &kdt, const real_1d_array &x, const double r, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, sorted by distance between point and X (by ascending) + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +NOTE: it is also possible to perform undordered queries performed by means + of kdtreequeryrnnu() and kdtreetsqueryrnnu() functions. Such queries + are faster because we do not have to use heap structure for sorting. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryrnn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreetsqueryrnn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +R-NN query: all points within R-sphere centered at X, using external +thread-local buffer, no ordering by distance as undicated by "U" suffix +(faster that ordered query, for large queries - significantly faster). + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + R - radius of sphere (in corresponding norm), R>0 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + +RESULT + number of neighbors found, >=0 + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +As indicated by "U" suffix, this function returns unordered results. + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryrnnu(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const bool selfmatch, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreetsqueryrnnu(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const double r, const xparams _xparams = alglib::xdefault); /************************************************************************* K-NN query: approximate K nearest neighbors +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryAKNN() ("Ts" stands for "thread-safe"). + INPUT PARAMETERS KDT - KD-tree X - point, array[0..NX-1]. @@ -489,33 +662,317 @@ neighbor is a neighbor whose distance from X is at most (1+eps) times distance of true nearest neighbor. -RESULT - number of actual neighbors found (either K or N, if K>N). +RESULT + number of actual neighbors found (either K or N, if K>N). + +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() to get distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const double eps, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const double eps, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +K-NN query: approximate K nearest neighbors, using thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + X - point, array[0..NX-1]. + K - number of neighbors to return, K>=1 + SelfMatch - whether self-matches are allowed: + * if True, nearest neighbor may be the point itself + (if it exists in original dataset) + * if False, then only points with non-zero distance + are returned + * if not given, considered True + Eps - approximation factor, Eps>=0. eps-approximate nearest + neighbor is a neighbor whose distance from X is at + most (1+eps) times distance of true nearest neighbor. + +RESULT + number of actual neighbors found (either K or N, if K>N). + +NOTES + significant performance gain may be achieved only when Eps is is on + the order of magnitude of 1 or larger. + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "buf" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() to get distances + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 18.03.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsqueryaknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const double eps, const xparams _xparams = alglib::xdefault); +ae_int_t kdtreetsqueryaknn(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &x, const ae_int_t k, const double eps, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Box query: all points within user-specified box. + +IMPORTANT: this function can not be used in multithreaded code because it + uses internal temporary buffer of kd-tree object, which can not + be shared between multiple threads. If you want to perform + parallel requests, use function which uses external request + buffer: KDTreeTsQueryBox() ("Ts" stands for "thread-safe"). + +INPUT PARAMETERS + KDT - KD-tree + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. + + +RESULT + number of actual neighbors found (in [0,N]). + +This subroutine performs query and stores its result in the internal +structures of the KD-tree. You can use following subroutines to obtain +these results: +* KDTreeQueryResultsX() to get X-values +* KDTreeQueryResultsXY() to get X- and Y-values +* KDTreeQueryResultsTags() to get tag values +* KDTreeQueryResultsDistances() returns zeros for this request + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). + + -- ALGLIB -- + Copyright 14.05.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreequerybox(const kdtree &kdt, const real_1d_array &boxmin, const real_1d_array &boxmax, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Box query: all points within user-specified box, using thread-local buffer. + +You can call this function from multiple threads for same kd-tree instance, +assuming that different instances of buffer object are passed to different +threads. + +INPUT PARAMETERS + KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure with kdtreecreaterequestbuffer() + function. + BoxMin - lower bounds, array[0..NX-1]. + BoxMax - upper bounds, array[0..NX-1]. + +RESULT + number of actual neighbors found (in [0,N]). + +This subroutine performs query and stores its result in the internal +structures of the buffer object. You can use following subroutines to +obtain these results (pay attention to "ts" in their names): +* KDTreeTsQueryResultsX() to get X-values +* KDTreeTsQueryResultsXY() to get X- and Y-values +* KDTreeTsQueryResultsTags() to get tag values +* KDTreeTsQueryResultsDistances() returns zeros for this query + +NOTE: this particular query returns unordered results, because there is no + meaningful way of ordering points. Furthermore, no 'distance' is + associated with points - it is either INSIDE or OUTSIDE (so request + for distances will return zeros). + +IMPORTANT: kd-tree buffer should be used only with KD-tree object which + was used to initialize buffer. Any attempt to use biffer with + different object is dangerous - you may get integrity check + failure (exception) because sizes of internal arrays do not fit + to dimensions of KD-tree structure. + + -- ALGLIB -- + Copyright 14.05.2016 by Bochkanov Sergey +*************************************************************************/ +ae_int_t kdtreetsquerybox(const kdtree &kdt, const kdtreerequestbuffer &buf, const real_1d_array &boxmin, const real_1d_array &boxmax, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +X-values from last query. + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsx(). + +INPUT PARAMETERS + KDT - KD-tree + X - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + X - rows are filled with X-values + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreequeryresultsx(const kdtree &kdt, real_2d_array &x, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +X- and Y-values from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsxy(). + +INPUT PARAMETERS + KDT - KD-tree + XY - possibly pre-allocated buffer. If XY is too small to store + result, it is resized. If size(XY) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + XY - rows are filled with points: first NX columns with + X-values, next NY columns - with Y-values. + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsTags() tag values +* KDTreeQueryResultsDistances() distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreequeryresultsxy(const kdtree &kdt, real_2d_array &xy, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Tags from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultstags(). + +INPUT PARAMETERS + KDT - KD-tree + Tags - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + Tags - filled with tags associated with points, + or, when no tags were supplied, with zeros + +NOTES +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. + +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsDistances() distances + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void kdtreequeryresultstags(const kdtree &kdt, integer_1d_array &tags, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Distances from last query + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - kdtreetsqueryresultsdistances(). + +INPUT PARAMETERS + KDT - KD-tree + R - possibly pre-allocated buffer. If X is too small to store + result, it is resized. If size(X) is enough to store + result, it is left unchanged. + +OUTPUT PARAMETERS + R - filled with distances (in corresponding norm) NOTES - significant performance gain may be achieved only when Eps is is on - the order of magnitude of 1 or larger. +1. points are ordered by distance from the query point (first = closest) +2. if XY is larger than required to store result, only leading part will + be overwritten; trailing part will be left unchanged. So if on input + XY = [[A,B],[C,D]], and result is [1,2], then on exit we will get + XY = [[1,2],[C,D]]. This is done purposely to increase performance; if + you want function to resize array according to result size, use + function with same name and suffix 'I'. -This subroutine performs query and stores its result in the internal -structures of the KD-tree. You can use following subroutines to obtain -these results: -* KDTreeQueryResultsX() to get X-values -* KDTreeQueryResultsXY() to get X- and Y-values -* KDTreeQueryResultsTags() to get tag values -* KDTreeQueryResultsDistances() to get distances +SEE ALSO +* KDTreeQueryResultsX() X-values +* KDTreeQueryResultsXY() X- and Y-values +* KDTreeQueryResultsTags() tag values -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const bool selfmatch, const double eps); -ae_int_t kdtreequeryaknn(const kdtree &kdt, const real_1d_array &x, const ae_int_t k, const double eps); +void kdtreequeryresultsdistances(const kdtree &kdt, real_1d_array &r, const xparams _xparams = alglib::xdefault); /************************************************************************* -X-values from last query +X-values from last query associated with kdtreerequestbuffer object. INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. X - possibly pre-allocated buffer. If X is too small to store result, it is resized. If size(X) is enough to store result, it is left unchanged. @@ -540,14 +997,16 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsx(const kdtree &kdt, real_2d_array &x); +void kdtreetsqueryresultsx(const kdtree &kdt, const kdtreerequestbuffer &buf, real_2d_array &x, const xparams _xparams = alglib::xdefault); /************************************************************************* -X- and Y-values from last query +X- and Y-values from last query associated with kdtreerequestbuffer object. INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. XY - possibly pre-allocated buffer. If XY is too small to store result, it is resized. If size(XY) is enough to store result, it is left unchanged. @@ -573,14 +1032,21 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxy(const kdtree &kdt, real_2d_array &xy); +void kdtreetsqueryresultsxy(const kdtree &kdt, const kdtreerequestbuffer &buf, real_2d_array &xy, const xparams _xparams = alglib::xdefault); /************************************************************************* -Tags from last query +Tags from last query associated with kdtreerequestbuffer object. + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultstags(). INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. Tags - possibly pre-allocated buffer. If X is too small to store result, it is resized. If size(X) is enough to store result, it is left unchanged. @@ -606,14 +1072,21 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultstags(const kdtree &kdt, integer_1d_array &tags); +void kdtreetsqueryresultstags(const kdtree &kdt, const kdtreerequestbuffer &buf, integer_1d_array &tags, const xparams _xparams = alglib::xdefault); /************************************************************************* -Distances from last query +Distances from last query associated with kdtreerequestbuffer object. + +This function retuns results stored in the internal buffer of kd-tree +object. If you performed buffered requests (ones which use instances of +kdtreerequestbuffer class), you should call buffered version of this +function - KDTreeTsqueryresultsdistances(). INPUT PARAMETERS KDT - KD-tree + Buf - request buffer object created for this particular + instance of kd-tree structure. R - possibly pre-allocated buffer. If X is too small to store result, it is resized. If size(X) is enough to store result, it is left unchanged. @@ -638,7 +1111,7 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsdistances(const kdtree &kdt, real_1d_array &r); +void kdtreetsqueryresultsdistances(const kdtree &kdt, const kdtreerequestbuffer &buf, real_1d_array &r, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -653,7 +1126,7 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxi(const kdtree &kdt, real_2d_array &x); +void kdtreequeryresultsxi(const kdtree &kdt, real_2d_array &x, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -668,7 +1141,7 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsxyi(const kdtree &kdt, real_2d_array &xy); +void kdtreequeryresultsxyi(const kdtree &kdt, real_2d_array &xy, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -683,7 +1156,7 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultstagsi(const kdtree &kdt, integer_1d_array &tags); +void kdtreequeryresultstagsi(const kdtree &kdt, integer_1d_array &tags, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -698,8 +1171,148 @@ -- ALGLIB -- Copyright 28.02.2010 by Bochkanov Sergey *************************************************************************/ -void kdtreequeryresultsdistancesi(const kdtree &kdt, real_1d_array &r); +void kdtreequeryresultsdistancesi(const kdtree &kdt, real_1d_array &r, const xparams _xparams = alglib::xdefault); +#endif + +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +HQRNDState initialization with random values which come from standard +RNG. + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndrandomize(hqrndstate &state, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +HQRNDState initialization with seed values + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndseed(const ae_int_t s1, const ae_int_t s2, hqrndstate &state, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +This function generates random real number in (0,1), +not including interval boundaries + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +double hqrnduniformr(const hqrndstate &state, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +This function generates random integer number in [0, N) + +1. State structure must be initialized with HQRNDRandomize() or HQRNDSeed() +2. N can be any positive number except for very large numbers: + * close to 2^31 on 32-bit systems + * close to 2^62 on 64-bit systems + An exception will be generated if N is too large. + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +ae_int_t hqrnduniformi(const hqrndstate &state, const ae_int_t n, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Random number generator: normal numbers + +This function generates one random number from normal distribution. +Its performance is equal to that of HQRNDNormal2() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +double hqrndnormal(const hqrndstate &state, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Random number generator: random X and Y such that X^2+Y^2=1 + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndunit2(const hqrndstate &state, double &x, double &y, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Random number generator: normal numbers + +This function generates two independent random numbers from normal +distribution. Its performance is equal to that of HQRNDNormal() + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 02.12.2009 by Bochkanov Sergey +*************************************************************************/ +void hqrndnormal2(const hqrndstate &state, double &x1, double &x2, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +Random number generator: exponential distribution + +State structure must be initialized with HQRNDRandomize() or HQRNDSeed(). + + -- ALGLIB -- + Copyright 11.08.2007 by Bochkanov Sergey +*************************************************************************/ +double hqrndexponential(const hqrndstate &state, const double lambdav, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +This function generates random number from discrete distribution given by +finite sample X. + +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample + N - number of elements to use, N>=1 + +RESULT + this function returns one of the X[i] for random i=0..N-1 + + -- ALGLIB -- + Copyright 08.11.2011 by Bochkanov Sergey +*************************************************************************/ +double hqrnddiscrete(const hqrndstate &state, const real_1d_array &x, const ae_int_t n, const xparams _xparams = alglib::xdefault); + + +/************************************************************************* +This function generates random number from continuous distribution given +by finite sample X. + +INPUT PARAMETERS + State - high quality random number generator, must be + initialized with HQRNDRandomize() or HQRNDSeed(). + X - finite sample, array[N] (can be larger, in this case only + leading N elements are used). THIS ARRAY MUST BE SORTED BY + ASCENDING. + N - number of elements to use, N>=1 + +RESULT + this function returns random number from continuous distribution which + tries to approximate X as mush as possible. min(X)<=Result<=max(X). + + -- ALGLIB -- + Copyright 08.11.2011 by Bochkanov Sergey +*************************************************************************/ +double hqrndcontinuous(const hqrndstate &state, const real_1d_array &x, const ae_int_t n, const xparams _xparams = alglib::xdefault); +#endif +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This is debug function intended for testing ALGLIB interface generator. Never use it in any real life project. @@ -711,7 +1324,7 @@ -- ALGLIB -- Copyright 27.05.2014 by Bochkanov Sergey *************************************************************************/ -void xdebuginitrecord1(xdebugrecord1 &rec1); +void xdebuginitrecord1(xdebugrecord1 &rec1, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -723,7 +1336,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -ae_int_t xdebugb1count(const boolean_1d_array &a); +ae_int_t xdebugb1count(const boolean_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -736,7 +1349,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugb1not(const boolean_1d_array &a); +void xdebugb1not(const boolean_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -749,7 +1362,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugb1appendcopy(boolean_1d_array &a); +void xdebugb1appendcopy(boolean_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -762,7 +1375,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugb1outeven(const ae_int_t n, boolean_1d_array &a); +void xdebugb1outeven(const ae_int_t n, boolean_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -774,7 +1387,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -ae_int_t xdebugi1sum(const integer_1d_array &a); +ae_int_t xdebugi1sum(const integer_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -787,7 +1400,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugi1neg(const integer_1d_array &a); +void xdebugi1neg(const integer_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -800,7 +1413,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugi1appendcopy(integer_1d_array &a); +void xdebugi1appendcopy(integer_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -815,7 +1428,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugi1outeven(const ae_int_t n, integer_1d_array &a); +void xdebugi1outeven(const ae_int_t n, integer_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -827,7 +1440,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double xdebugr1sum(const real_1d_array &a); +double xdebugr1sum(const real_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -840,7 +1453,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr1neg(const real_1d_array &a); +void xdebugr1neg(const real_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -853,7 +1466,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr1appendcopy(real_1d_array &a); +void xdebugr1appendcopy(real_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -868,7 +1481,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr1outeven(const ae_int_t n, real_1d_array &a); +void xdebugr1outeven(const ae_int_t n, real_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -880,7 +1493,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -alglib::complex xdebugc1sum(const complex_1d_array &a); +alglib::complex xdebugc1sum(const complex_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -893,7 +1506,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc1neg(const complex_1d_array &a); +void xdebugc1neg(const complex_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -906,7 +1519,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc1appendcopy(complex_1d_array &a); +void xdebugc1appendcopy(complex_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -921,7 +1534,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc1outeven(const ae_int_t n, complex_1d_array &a); +void xdebugc1outeven(const ae_int_t n, complex_1d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -933,7 +1546,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -ae_int_t xdebugb2count(const boolean_2d_array &a); +ae_int_t xdebugb2count(const boolean_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -946,7 +1559,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugb2not(const boolean_2d_array &a); +void xdebugb2not(const boolean_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -959,7 +1572,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugb2transpose(boolean_2d_array &a); +void xdebugb2transpose(boolean_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -972,7 +1585,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugb2outsin(const ae_int_t m, const ae_int_t n, boolean_2d_array &a); +void xdebugb2outsin(const ae_int_t m, const ae_int_t n, boolean_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -984,7 +1597,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -ae_int_t xdebugi2sum(const integer_2d_array &a); +ae_int_t xdebugi2sum(const integer_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -997,7 +1610,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugi2neg(const integer_2d_array &a); +void xdebugi2neg(const integer_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1010,7 +1623,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugi2transpose(integer_2d_array &a); +void xdebugi2transpose(integer_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1023,7 +1636,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugi2outsin(const ae_int_t m, const ae_int_t n, integer_2d_array &a); +void xdebugi2outsin(const ae_int_t m, const ae_int_t n, integer_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1035,7 +1648,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double xdebugr2sum(const real_2d_array &a); +double xdebugr2sum(const real_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1048,7 +1661,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr2neg(const real_2d_array &a); +void xdebugr2neg(const real_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1061,7 +1674,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr2transpose(real_2d_array &a); +void xdebugr2transpose(real_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1074,7 +1687,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugr2outsin(const ae_int_t m, const ae_int_t n, real_2d_array &a); +void xdebugr2outsin(const ae_int_t m, const ae_int_t n, real_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1086,7 +1699,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -alglib::complex xdebugc2sum(const complex_2d_array &a); +alglib::complex xdebugc2sum(const complex_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1099,7 +1712,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc2neg(const complex_2d_array &a); +void xdebugc2neg(const complex_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1112,7 +1725,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc2transpose(complex_2d_array &a); +void xdebugc2transpose(complex_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1125,7 +1738,7 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -void xdebugc2outsincos(const ae_int_t m, const ae_int_t n, complex_2d_array &a); +void xdebugc2outsincos(const ae_int_t m, const ae_int_t n, complex_2d_array &a, const xparams _xparams = alglib::xdefault); /************************************************************************* @@ -1137,7 +1750,8 @@ -- ALGLIB -- Copyright 11.10.2013 by Bochkanov Sergey *************************************************************************/ -double xdebugmaskedbiasedproductsum(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const real_2d_array &b, const boolean_2d_array &c); +double xdebugmaskedbiasedproductsum(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const real_2d_array &b, const boolean_2d_array &c, const xparams _xparams = alglib::xdefault); +#endif } ///////////////////////////////////////////////////////////////////////// @@ -1147,34 +1761,7 @@ ///////////////////////////////////////////////////////////////////////// namespace alglib_impl { -void hqrndrandomize(hqrndstate* state, ae_state *_state); -void hqrndseed(ae_int_t s1, - ae_int_t s2, - hqrndstate* state, - ae_state *_state); -double hqrnduniformr(hqrndstate* state, ae_state *_state); -ae_int_t hqrnduniformi(hqrndstate* state, ae_int_t n, ae_state *_state); -double hqrndnormal(hqrndstate* state, ae_state *_state); -void hqrndunit2(hqrndstate* state, double* x, double* y, ae_state *_state); -void hqrndnormal2(hqrndstate* state, - double* x1, - double* x2, - ae_state *_state); -double hqrndexponential(hqrndstate* state, - double lambdav, - ae_state *_state); -double hqrnddiscrete(hqrndstate* state, - /* Real */ ae_vector* x, - ae_int_t n, - ae_state *_state); -double hqrndcontinuous(hqrndstate* state, - /* Real */ ae_vector* x, - ae_int_t n, - ae_state *_state); -void _hqrndstate_init(void* _p, ae_state *_state); -void _hqrndstate_init_copy(void* _dst, void* _src, ae_state *_state); -void _hqrndstate_clear(void* _p); -void _hqrndstate_destroy(void* _p); +#if defined(AE_COMPILE_NEARESTNEIGHBOR) || !defined(AE_PARTIAL_BUILD) void kdtreebuild(/* Real */ ae_matrix* xy, ae_int_t n, ae_int_t nx, @@ -1190,22 +1777,64 @@ ae_int_t normtype, kdtree* kdt, ae_state *_state); +void kdtreecreaterequestbuffer(kdtree* kdt, + kdtreerequestbuffer* buf, + ae_state *_state); ae_int_t kdtreequeryknn(kdtree* kdt, /* Real */ ae_vector* x, ae_int_t k, ae_bool selfmatch, ae_state *_state); +ae_int_t kdtreetsqueryknn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + ae_int_t k, + ae_bool selfmatch, + ae_state *_state); ae_int_t kdtreequeryrnn(kdtree* kdt, /* Real */ ae_vector* x, double r, ae_bool selfmatch, ae_state *_state); +ae_int_t kdtreequeryrnnu(kdtree* kdt, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state); +ae_int_t kdtreetsqueryrnn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state); +ae_int_t kdtreetsqueryrnnu(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + double r, + ae_bool selfmatch, + ae_state *_state); ae_int_t kdtreequeryaknn(kdtree* kdt, /* Real */ ae_vector* x, ae_int_t k, ae_bool selfmatch, double eps, ae_state *_state); +ae_int_t kdtreetsqueryaknn(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* x, + ae_int_t k, + ae_bool selfmatch, + double eps, + ae_state *_state); +ae_int_t kdtreequerybox(kdtree* kdt, + /* Real */ ae_vector* boxmin, + /* Real */ ae_vector* boxmax, + ae_state *_state); +ae_int_t kdtreetsquerybox(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* boxmin, + /* Real */ ae_vector* boxmax, + ae_state *_state); void kdtreequeryresultsx(kdtree* kdt, /* Real */ ae_matrix* x, ae_state *_state); @@ -1218,6 +1847,22 @@ void kdtreequeryresultsdistances(kdtree* kdt, /* Real */ ae_vector* r, ae_state *_state); +void kdtreetsqueryresultsx(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_matrix* x, + ae_state *_state); +void kdtreetsqueryresultsxy(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_matrix* xy, + ae_state *_state); +void kdtreetsqueryresultstags(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Integer */ ae_vector* tags, + ae_state *_state); +void kdtreetsqueryresultsdistances(kdtree* kdt, + kdtreerequestbuffer* buf, + /* Real */ ae_vector* r, + ae_state *_state); void kdtreequeryresultsxi(kdtree* kdt, /* Real */ ae_matrix* x, ae_state *_state); @@ -1230,13 +1875,69 @@ void kdtreequeryresultsdistancesi(kdtree* kdt, /* Real */ ae_vector* r, ae_state *_state); +void kdtreeexplorebox(kdtree* kdt, + /* Real */ ae_vector* boxmin, + /* Real */ ae_vector* boxmax, + ae_state *_state); +void kdtreeexplorenodetype(kdtree* kdt, + ae_int_t node, + ae_int_t* nodetype, + ae_state *_state); +void kdtreeexploreleaf(kdtree* kdt, + ae_int_t node, + /* Real */ ae_matrix* xy, + ae_int_t* k, + ae_state *_state); +void kdtreeexploresplit(kdtree* kdt, + ae_int_t node, + ae_int_t* d, + double* s, + ae_int_t* nodele, + ae_int_t* nodege, + ae_state *_state); void kdtreealloc(ae_serializer* s, kdtree* tree, ae_state *_state); void kdtreeserialize(ae_serializer* s, kdtree* tree, ae_state *_state); void kdtreeunserialize(ae_serializer* s, kdtree* tree, ae_state *_state); -void _kdtree_init(void* _p, ae_state *_state); -void _kdtree_init_copy(void* _dst, void* _src, ae_state *_state); +void _kdtreerequestbuffer_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _kdtreerequestbuffer_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); +void _kdtreerequestbuffer_clear(void* _p); +void _kdtreerequestbuffer_destroy(void* _p); +void _kdtree_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _kdtree_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _kdtree_clear(void* _p); void _kdtree_destroy(void* _p); +#endif +#if defined(AE_COMPILE_HQRND) || !defined(AE_PARTIAL_BUILD) +void hqrndrandomize(hqrndstate* state, ae_state *_state); +void hqrndseed(ae_int_t s1, + ae_int_t s2, + hqrndstate* state, + ae_state *_state); +double hqrnduniformr(hqrndstate* state, ae_state *_state); +ae_int_t hqrnduniformi(hqrndstate* state, ae_int_t n, ae_state *_state); +double hqrndnormal(hqrndstate* state, ae_state *_state); +void hqrndunit2(hqrndstate* state, double* x, double* y, ae_state *_state); +void hqrndnormal2(hqrndstate* state, + double* x1, + double* x2, + ae_state *_state); +double hqrndexponential(hqrndstate* state, + double lambdav, + ae_state *_state); +double hqrnddiscrete(hqrndstate* state, + /* Real */ ae_vector* x, + ae_int_t n, + ae_state *_state); +double hqrndcontinuous(hqrndstate* state, + /* Real */ ae_vector* x, + ae_int_t n, + ae_state *_state); +void _hqrndstate_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _hqrndstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); +void _hqrndstate_clear(void* _p); +void _hqrndstate_destroy(void* _p); +#endif +#if defined(AE_COMPILE_XDEBUG) || !defined(AE_PARTIAL_BUILD) void xdebuginitrecord1(xdebugrecord1* rec1, ae_state *_state); ae_int_t xdebugb1count(/* Boolean */ ae_vector* a, ae_state *_state); void xdebugb1not(/* Boolean */ ae_vector* a, ae_state *_state); @@ -1296,10 +1997,11 @@ /* Real */ ae_matrix* b, /* Boolean */ ae_matrix* c, ae_state *_state); -void _xdebugrecord1_init(void* _p, ae_state *_state); -void _xdebugrecord1_init_copy(void* _dst, void* _src, ae_state *_state); +void _xdebugrecord1_init(void* _p, ae_state *_state, ae_bool make_automatic); +void _xdebugrecord1_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic); void _xdebugrecord1_clear(void* _p); void _xdebugrecord1_destroy(void* _p); +#endif } #endif diff -Nru alglib-3.10.0/src/ap.cpp alglib-3.16.0/src/ap.cpp --- alglib-3.10.0/src/ap.cpp 2015-08-19 12:24:23.000000000 +0000 +++ alglib-3.16.0/src/ap.cpp 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -17,11 +17,24 @@ http://www.fsf.org/licensing/licenses >>> END OF LICENSE >>> *************************************************************************/ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS +#endif + +// +// if AE_OS==AE_LINUX (will be redefined to AE_POSIX in ap.h), +// set _GNU_SOURCE flag BEFORE any #includes to get affinity +// management functions +// +#if (AE_OS==AE_LINUX) && !defined(_GNU_SOURCE) +#define _GNU_SOURCE +#endif + #include "stdafx.h" #include "ap.h" #include #include -using namespace std; +#include #if defined(AE_CPU) #if (AE_CPU==AE_INTEL) @@ -34,9 +47,10 @@ #endif // disable some irrelevant warnings -#if (AE_COMPILER==AE_MSVC) +#if (AE_COMPILER==AE_MSVC) && !defined(AE_ALL_WARNINGS) #pragma warning(disable:4100) #pragma warning(disable:4127) +#pragma warning(disable:4611) #pragma warning(disable:4702) #pragma warning(disable:4996) #endif @@ -95,14 +109,39 @@ #define AE_SM_DEFAULT 0 #define AE_SM_ALLOC 1 #define AE_SM_READY2S 2 -#define AE_SM_TO_STRING 10 -#define AE_SM_FROM_STRING 20 +#define AE_SM_TO_STRING 10 #define AE_SM_TO_CPPSTRING 11 +#define AE_SM_TO_STREAM 12 +#define AE_SM_FROM_STRING 20 +#define AE_SM_FROM_STREAM 22 #define AE_LOCK_CYCLES 512 #define AE_LOCK_TESTS_BEFORE_YIELD 16 #define AE_CRITICAL_ASSERT(x) if( !(x) ) abort() +/* IDs for set_dbg_value */ +#define _ALGLIB_USE_ALLOC_COUNTER 0 +#define _ALGLIB_USE_DBG_COUNTERS 1 +#define _ALGLIB_USE_VENDOR_KERNELS 100 +#define _ALGLIB_VENDOR_MEMSTAT 101 + +#define _ALGLIB_DEBUG_WORKSTEALING 200 +#define _ALGLIB_WSDBG_NCORES 201 +#define _ALGLIB_WSDBG_PUSHROOT_OK 202 +#define _ALGLIB_WSDBG_PUSHROOT_FAILED 203 + +#define _ALGLIB_SET_GLOBAL_THREADING 1001 +#define _ALGLIB_SET_NWORKERS 1002 + +/* IDs for get_dbg_value */ +#define _ALGLIB_GET_ALLOC_COUNTER 0 +#define _ALGLIB_GET_CUMULATIVE_ALLOC_SIZE 1 +#define _ALGLIB_GET_CUMULATIVE_ALLOC_COUNT 2 + +#define _ALGLIB_GET_CORES_COUNT 1000 +#define _ALGLIB_GET_GLOBAL_THREADING 1001 +#define _ALGLIB_GET_NWORKERS 1002 + /************************************************************************* Lock. @@ -121,11 +160,64 @@ } _lock; + + +/* + * Error tracking facilities; this fields are modified every time ae_set_error_flag() + * is called with non-zero cond. Thread unsafe access, but it does not matter actually. + */ +static const char * sef_file = ""; +static int sef_line = 0; +static const char * sef_xdesc = ""; + +/* + * Global flags, split into several char-sized variables in order + * to avoid problem with non-atomic reads/writes (single-byte ops + * are atomic on all modern architectures); + * + * Following variables are included: + * * threading-related settings + */ +unsigned char _alglib_global_threading_flags = _ALGLIB_FLG_THREADING_SERIAL>>_ALGLIB_FLG_THREADING_SHIFT; + +/* + * DESCRIPTION: recommended number of active workers: + * * positive value >=1 is used to specify exact number of active workers + * * 0 means that ALL available cores are used + * * negative value means that all cores EXCEPT for cores_to_use will be used + * (say, -1 means that all cores except for one will be used). At least one + * core will be used in this case, even if you assign -9999999 to this field. + * + * Default value = 0 (fully parallel execution) when AE_NWORKERS is not defined + * = 0 for manually defined number of cores (AE_NWORKERS is defined) + * PROTECTION: not needed; runtime modification is possible, but we do not need exact + * synchronization. + */ +#if defined(AE_NWORKERS) && (AE_NWORKERS<=0) +#error AE_NWORKERS must be positive number or not defined at all. +#endif +#if defined(AE_NWORKERS) +ae_int_t _alglib_cores_to_use = 0; +#else +ae_int_t _alglib_cores_to_use = 0; +#endif + /* - * alloc counter + * Debug counters */ -ae_int64_t _alloc_counter = 0; +ae_int_t _alloc_counter = 0; +ae_int_t _alloc_counter_total = 0; ae_bool _use_alloc_counter = ae_false; + +ae_int_t _dbg_alloc_total = 0; +ae_bool _use_dbg_counters = ae_false; + +ae_bool _use_vendor_kernels = ae_true; + +ae_bool debug_workstealing = ae_false; /* debug workstealing environment? False by default */ +ae_int_t dbgws_pushroot_ok = 0; +ae_int_t dbgws_pushroot_failed = 0; + #ifdef AE_SMP_DEBUGCOUNTERS __declspec(align(AE_LOCK_ALIGNMENT)) volatile ae_int64_t _ae_dbg_lock_acquisitions = 0; __declspec(align(AE_LOCK_ALIGNMENT)) volatile ae_int64_t _ae_dbg_lock_spinwaits = 0; @@ -133,6 +225,46 @@ #endif /* + * Allocation debugging + */ +ae_bool _force_malloc_failure = ae_false; +ae_int_t _malloc_failure_after = 0; + + +/* + * Trace-related declarations: + * alglib_trace_type - trace output type + * alglib_trace_file - file descriptor (to be used by ALGLIB code which + * sends messages to trace log + * alglib_fclose_trace - whether we have to call fclose() when disabling or + * changing trace output + * alglib_trace_tags - string buffer used to store tags + two additional + * characters (leading and trailing commas) + null + * terminator + */ +#define ALGLIB_TRACE_NONE 0 +#define ALGLIB_TRACE_FILE 1 +#define ALGLIB_TRACE_TAGS_LEN 2048 +#define ALGLIB_TRACE_BUFFER_LEN (ALGLIB_TRACE_TAGS_LEN+2+1) +static ae_int_t alglib_trace_type = ALGLIB_TRACE_NONE; +FILE *alglib_trace_file = NULL; +static ae_bool alglib_fclose_trace = ae_false; +static char alglib_trace_tags[ALGLIB_TRACE_BUFFER_LEN]; + +/* + * Fields for memory allocation over static array + */ +#if AE_MALLOC==AE_BASIC_STATIC_MALLOC +#if AE_THREADING!=AE_SERIAL_UNSAFE +#error Basis static malloc is thread-unsafe; define AE_THREADING=AE_SERIAL_UNSAFE to prove that you know it +#endif +static ae_int_t sm_page_size = 0; +static ae_int_t sm_page_cnt = 0; +static ae_int_t *sm_page_tbl = NULL; +static unsigned char *sm_mem = NULL; +#endif + +/* * These declarations are used to ensure that * sizeof(ae_bool)=1, sizeof(ae_int32_t)==4, sizeof(ae_int64_t)==8, sizeof(ae_int_t)==sizeof(void*). * they will lead to syntax error otherwise (array size will be negative). @@ -140,10 +272,11 @@ * you can remove them, if you want - they are not used anywhere. * */ -static char _ae_bool_must_be_8_bits_wide[1-2*((int)(sizeof(ae_bool))-1)*((int)(sizeof(ae_bool))-1)]; -static char _ae_int32_t_must_be_32_bits_wide[1-2*((int)(sizeof(ae_int32_t))-4)*((int)(sizeof(ae_int32_t))-4)]; -static char _ae_int64_t_must_be_64_bits_wide[1-2*((int)(sizeof(ae_int64_t))-8)*((int)(sizeof(ae_int64_t))-8)]; -static char _ae_int_t_must_be_pointer_sized [1-2*((int)(sizeof(ae_int_t))-(int)sizeof(void*))*((int)(sizeof(ae_int_t))-(int)(sizeof(void*)))]; +static char _ae_bool_must_be_8_bits_wide [1-2*((int)(sizeof(ae_bool))-1)*((int)(sizeof(ae_bool))-1)]; +static char _ae_int32_t_must_be_32_bits_wide[1-2*((int)(sizeof(ae_int32_t))-4)*((int)(sizeof(ae_int32_t))-4)]; +static char _ae_int64_t_must_be_64_bits_wide[1-2*((int)(sizeof(ae_int64_t))-8)*((int)(sizeof(ae_int64_t))-8)]; +static char _ae_uint64_t_must_be_64_bits_wide[1-2*((int)(sizeof(ae_uint64_t))-8)*((int)(sizeof(ae_uint64_t))-8)]; +static char _ae_int_t_must_be_pointer_sized [1-2*((int)(sizeof(ae_int_t))-(int)sizeof(void*))*((int)(sizeof(ae_int_t))-(int)(sizeof(void*)))]; /* * This variable is used to prevent some tricky optimizations which may degrade multithreaded performance. @@ -152,6 +285,168 @@ */ static volatile ae_int_t ae_never_change_it = 1; +/************************************************************************* +This function should never be called. It is here to prevent spurious +compiler warnings about unused variables (in fact: used). +*************************************************************************/ +void ae_never_call_it() +{ + ae_touch_ptr((void*)_ae_bool_must_be_8_bits_wide); + ae_touch_ptr((void*)_ae_int32_t_must_be_32_bits_wide); + ae_touch_ptr((void*)_ae_int64_t_must_be_64_bits_wide); + ae_touch_ptr((void*)_ae_uint64_t_must_be_64_bits_wide); + ae_touch_ptr((void*)_ae_int_t_must_be_pointer_sized); +} + +void ae_set_dbg_flag(ae_int64_t flag_id, ae_int64_t flag_val) +{ + if( flag_id==_ALGLIB_USE_ALLOC_COUNTER ) + { + _use_alloc_counter = flag_val!=0; + return; + } + if( flag_id==_ALGLIB_USE_DBG_COUNTERS ) + { + _use_dbg_counters = flag_val!=0; + return; + } + if( flag_id==_ALGLIB_USE_VENDOR_KERNELS ) + { + _use_vendor_kernels = flag_val!=0; + return; + } + if( flag_id==_ALGLIB_DEBUG_WORKSTEALING ) + { + debug_workstealing = flag_val!=0; + return; + } + if( flag_id==_ALGLIB_SET_GLOBAL_THREADING ) + { + ae_set_global_threading((ae_uint64_t)flag_val); + return; + } + if( flag_id==_ALGLIB_SET_NWORKERS ) + { + _alglib_cores_to_use = (ae_int_t)flag_val; + return; + } +} + +ae_int64_t ae_get_dbg_value(ae_int64_t id) +{ + if( id==_ALGLIB_GET_ALLOC_COUNTER ) + return _alloc_counter; + if( id==_ALGLIB_GET_CUMULATIVE_ALLOC_SIZE ) + return _dbg_alloc_total; + if( id==_ALGLIB_GET_CUMULATIVE_ALLOC_COUNT ) + return _alloc_counter_total; + + if( id==_ALGLIB_VENDOR_MEMSTAT ) + { +#if defined(AE_MKL) + return ae_mkl_memstat(); +#else + return 0; +#endif + } + + /* workstealing counters */ + if( id==_ALGLIB_WSDBG_NCORES ) +#if defined(AE_SMP) + return ae_cores_count(); +#else + return 0; +#endif + if( id==_ALGLIB_WSDBG_PUSHROOT_OK ) + return dbgws_pushroot_ok; + if( id==_ALGLIB_WSDBG_PUSHROOT_FAILED ) + return dbgws_pushroot_failed; + + if( id==_ALGLIB_GET_CORES_COUNT ) +#if defined(AE_SMP) + return ae_cores_count(); +#else + return 0; +#endif + if( id==_ALGLIB_GET_GLOBAL_THREADING ) + return (ae_int64_t)ae_get_global_threading(); + if( id==_ALGLIB_GET_NWORKERS ) + return (ae_int64_t)_alglib_cores_to_use; + + /* unknown value */ + return 0; +} + +/************************************************************************ +This function sets default (global) threading model: +* serial execution +* multithreading, if cores_to_use allows it + +************************************************************************/ +void ae_set_global_threading(ae_uint64_t flg_value) +{ + flg_value = flg_value&_ALGLIB_FLG_THREADING_MASK; + AE_CRITICAL_ASSERT(flg_value==_ALGLIB_FLG_THREADING_SERIAL || flg_value==_ALGLIB_FLG_THREADING_PARALLEL); + _alglib_global_threading_flags = (unsigned char)(flg_value>>_ALGLIB_FLG_THREADING_SHIFT); +} + +/************************************************************************ +This function gets default (global) threading model: +* serial execution +* multithreading, if cores_to_use allows it + +************************************************************************/ +ae_uint64_t ae_get_global_threading() +{ + return ((ae_uint64_t)_alglib_global_threading_flags)<<_ALGLIB_FLG_THREADING_SHIFT; +} + +void ae_set_error_flag(ae_bool *p_flag, ae_bool cond, const char *filename, int lineno, const char *xdesc) +{ + if( cond ) + { + *p_flag = ae_true; + sef_file = filename; + sef_line = lineno; + sef_xdesc= xdesc; +#ifdef ALGLIB_ABORT_ON_ERROR_FLAG + printf("[ALGLIB] aborting on ae_set_error_flag(cond=true)\n"); + printf("[ALGLIB] %s:%d\n", filename, lineno); + printf("[ALGLIB] %s\n", xdesc); + fflush(stdout); + if( alglib_trace_file!=NULL ) fflush(alglib_trace_file); + abort(); +#endif + } +} + +/************************************************************************ +This function returns file name for the last call of ae_set_error_flag() +with non-zero cond parameter. +************************************************************************/ +const char * ae_get_last_error_file() +{ + return sef_file; +} + +/************************************************************************ +This function returns line number for the last call of ae_set_error_flag() +with non-zero cond parameter. +************************************************************************/ +int ae_get_last_error_line() +{ + return sef_line; +} + +/************************************************************************ +This function returns extra description for the last call of ae_set_error_flag() +with non-zero cond parameter. +************************************************************************/ +const char * ae_get_last_error_xdesc() +{ + return sef_xdesc; +} + ae_int_t ae_misalignment(const void *ptr, size_t alignment) { union _u @@ -171,15 +466,151 @@ return result; } +/************************************************************************ +This function maps nworkers number (which can be positive, zero or +negative with 0 meaning "all cores", -1 meaning "all cores -1" and so on) +to "effective", strictly positive workers count. + +This function is intended to be used by debugging/testing code which +tests different number of worker threads. It is NOT aligned in any way +with ALGLIB multithreading framework (i.e. it can return non-zero worker +count even for single-threaded GPLed ALGLIB). +************************************************************************/ +ae_int_t ae_get_effective_workers(ae_int_t nworkers) +{ + ae_int_t ncores; + + /* determine cores count */ +#if defined(AE_NWORKERS) + ncores = AE_NWORKERS; +#elif AE_OS==AE_WINDOWS + SYSTEM_INFO sysInfo; + GetSystemInfo(&sysInfo); + ncores = (ae_int_t)(sysInfo.dwNumberOfProcessors); +#elif AE_OS==AE_POSIX + { + long r = sysconf(_SC_NPROCESSORS_ONLN); + ncores = r<=0 ? 1 : r; + } +#else + ncores = 1; +#endif + AE_CRITICAL_ASSERT(ncores>=1); + + /* map nworkers to its effective value */ + if( nworkers>=1 ) + return nworkers>ncores ? ncores : nworkers; + return ncores+nworkers>=1 ? ncores+nworkers : 1; +} + +/************************************************************************* +This function belongs to the family of "optional atomics", i.e. atomic +functions which either perform atomic changes - or do nothing at all, if +current compiler settings do not allow us to generate atomic code. + +All "optional atomics" are synchronized, i.e. either all of them work - or +no one of the works. + +This particular function performs atomic addition on pointer-sized value, +which must be pointer-size aligned. + +NOTE: this function is not intended to be extremely high performance one, + so use it only when necessary. +*************************************************************************/ +void ae_optional_atomic_add_i(ae_int_t *p, ae_int_t v) +{ + AE_CRITICAL_ASSERT(ae_misalignment(p,sizeof(void*))==0); +#if AE_OS==AE_WINDOWS + for(;;) + { + /* perform conversion between ae_int_t* and void** + without compiler warnings about indirection levels */ + union _u + { + PVOID volatile * volatile ptr; + volatile ae_int_t * volatile iptr; + } u; + u.iptr = p; + + /* atomic read for initial value */ + PVOID v0 = InterlockedCompareExchangePointer(u.ptr, NULL, NULL); + + /* increment cached value and store */ + if( InterlockedCompareExchangePointer(u.ptr, (PVOID)(((char*)v0)+v), v0)==v0 ) + break; + } +#elif (AE_COMPILER==AE_GNUC) && (AE_CPU==AE_INTEL) && (__GNUC__*100+__GNUC__>=470) + __atomic_add_fetch(p, v, __ATOMIC_RELAXED); +#else +#endif +} + +/************************************************************************* +This function belongs to the family of "optional atomics", i.e. atomic +functions which either perform atomic changes - or do nothing at all, if +current compiler settings do not allow us to generate atomic code. + +All "optional atomics" are synchronized, i.e. either all of them work - or +no one of the works. + +This particular function performs atomic subtraction on pointer-sized +value, which must be pointer-size aligned. + +NOTE: this function is not intended to be extremely high performance one, + so use it only when necessary. +*************************************************************************/ +void ae_optional_atomic_sub_i(ae_int_t *p, ae_int_t v) +{ + AE_CRITICAL_ASSERT(ae_misalignment(p,sizeof(void*))==0); +#if AE_OS==AE_WINDOWS + for(;;) + { + /* perform conversion between ae_int_t* and void** + without compiler warnings about indirection levels */ + union _u + { + PVOID volatile * volatile ptr; + volatile ae_int_t * volatile iptr; + } u; + u.iptr = p; + + /* atomic read for initial value, convert it to 1-byte pointer */ + PVOID v0 = InterlockedCompareExchangePointer(u.ptr, NULL, NULL); + + /* increment cached value and store */ + if( InterlockedCompareExchangePointer(u.ptr, (PVOID)(((char*)v0)-v), v0)==v0 ) + break; + } +#elif (AE_COMPILER==AE_GNUC) && (AE_CPU==AE_INTEL) && (__GNUC__*100+__GNUC__>=470) + __atomic_sub_fetch(p, v, __ATOMIC_RELAXED); +#else +#endif +} + + +/************************************************************************* +This function cleans up automatically managed memory before caller terminates +ALGLIB executing by ae_break() or by simply stopping calling callback. + +For state!=NULL it calls thread_exception_handler() and the ae_state_clear(). +For state==NULL it does nothing. +*************************************************************************/ +void ae_clean_up_before_breaking(ae_state *state) +{ + if( state!=NULL ) + { + if( state->thread_exception_handler!=NULL ) + state->thread_exception_handler(state); + ae_state_clear(state); + } +} + /************************************************************************* This function abnormally aborts program, using one of several ways: -* for AE_USE_CPP_ERROR_HANDLING being NOT defined: - * for state!=NULL and state->break_jump being initialized with call to - ae_state_set_break_jump() - it performs longjmp() to return site. - * otherwise, abort() is called -* for AE_USE_CPP_ERROR_HANDLING being DEFINED - an instance of ae_error_type() - class is throw'ed. +* for state!=NULL and state->break_jump being initialized with call to + ae_state_set_break_jump() - it performs longjmp() to return site. +* otherwise, abort() is called In all cases, for state!=NULL function sets state->last_error and state->error_msg fields. It also clears state with ae_state_clear(). @@ -189,12 +620,11 @@ *************************************************************************/ void ae_break(ae_state *state, ae_error_type error_type, const char *msg) { -#ifndef AE_USE_CPP_ERROR_HANDLING if( state!=NULL ) { - if( state->thread_exception_handler!=NULL ) - state->thread_exception_handler(state); - ae_state_clear(state); + if( alglib_trace_type!=ALGLIB_TRACE_NONE ) + ae_trace("---!!! CRITICAL ERROR !!!--- exception with message '%s' was generated\n", msg!=NULL ? msg : ""); + ae_clean_up_before_breaking(state); state->last_error = error_type; state->error_msg = msg; if( state->break_jump!=NULL ) @@ -204,26 +634,173 @@ } else abort(); -#else - if( state!=NULL ) +} + +#if AE_MALLOC==AE_BASIC_STATIC_MALLOC +void set_memory_pool(void *ptr, size_t size) +{ + /* + * Integrity checks + */ + AE_CRITICAL_ASSERT(sm_page_size==0); + AE_CRITICAL_ASSERT(sm_page_cnt==0); + AE_CRITICAL_ASSERT(sm_page_tbl==NULL); + AE_CRITICAL_ASSERT(sm_mem==NULL); + AE_CRITICAL_ASSERT(size>0); + + /* + * Align pointer + */ + size -= ae_misalignment(ptr, sizeof(ae_int_t)); + ptr = ae_align(ptr, sizeof(ae_int_t)); + + /* + * Calculate page size and page count, prepare pointers to page table and memory + */ + sm_page_size = 256; + AE_CRITICAL_ASSERT(size>=(sm_page_size+sizeof(ae_int_t))+sm_page_size); /* we expect to have memory for at least one page + table entry + alignment */ + sm_page_cnt = (size-sm_page_size)/(sm_page_size+sizeof(ae_int_t)); + AE_CRITICAL_ASSERT(sm_page_cnt>0); + sm_page_tbl = (ae_int_t*)ptr; + sm_mem = (unsigned char*)ae_align(sm_page_tbl+sm_page_cnt, sm_page_size); + + /* + * Mark all pages as free + */ + memset(sm_page_tbl, 0, sm_page_cnt*sizeof(ae_int_t)); +} + +void* ae_static_malloc(size_t size, size_t alignment) +{ + int rq_pages, i, j, cur_len; + + AE_CRITICAL_ASSERT(size>=0); + AE_CRITICAL_ASSERT(sm_page_size>0); + AE_CRITICAL_ASSERT(sm_page_cnt>0); + AE_CRITICAL_ASSERT(sm_page_tbl!=NULL); + AE_CRITICAL_ASSERT(sm_mem!=NULL); + + if( size==0 ) + return NULL; + if( _force_malloc_failure ) + return NULL; + + /* check that page alignment and requested alignment match each other */ + AE_CRITICAL_ASSERT(alignment<=sm_page_size); + AE_CRITICAL_ASSERT((sm_page_size%alignment)==0); + + /* search long enough sequence of pages */ + rq_pages = size/sm_page_size; + if( size%sm_page_size ) + rq_pages++; + cur_len = 0; + for(i=0; i0); + cur_len=0; + i += sm_page_tbl[i]; + continue; + } + + /* found it? */ + if( cur_len>=rq_pages ) + { + /* update counters (if flag is set) */ + if( _use_alloc_counter ) + { + ae_optional_atomic_add_i(&_alloc_counter, 1); + ae_optional_atomic_add_i(&_alloc_counter_total, 1); + } + if( _use_dbg_counters ) + ae_optional_atomic_add_i(&_dbg_alloc_total, size); + + /* mark pages and return */ + for(j=0; j=0); + AE_CRITICAL_ASSERT((page_idx%sm_page_size)==0); + page_idx = page_idx/sm_page_size; + AE_CRITICAL_ASSERT(page_idx=1); + for(i=0; i0); + AE_CRITICAL_ASSERT(sm_page_cnt>0); + AE_CRITICAL_ASSERT(sm_page_tbl!=NULL); + AE_CRITICAL_ASSERT(sm_mem!=NULL); + + /* scan page table */ + *bytes_used = 0; + *bytes_free = 0; + for(i=0; ithread_exception_handler!=NULL ) - state->thread_exception_handler(state); - ae_state_clear(state); - state->last_error = error_type; - state->error_msg = msg; + if( sm_page_tbl[i]==0 ) + { + (*bytes_free)++; + i++; + } + else + { + AE_CRITICAL_ASSERT(sm_page_tbl[i]>0); + *bytes_used += sm_page_tbl[i]; + i += sm_page_tbl[i]; + } } - throw error_type; -#endif + *bytes_used *= sm_page_size; + *bytes_free *= sm_page_size; } +#endif void* aligned_malloc(size_t size, size_t alignment) { +#if AE_MALLOC==AE_BASIC_STATIC_MALLOC + return ae_static_malloc(size, alignment); +#else + char *result = NULL; + if( size==0 ) return NULL; + if( _force_malloc_failure ) + return NULL; + if( _malloc_failure_after>0 && _alloc_counter_total>=_malloc_failure_after ) + return NULL; + + /* allocate */ if( alignment<=1 ) { - /* no alignment, just call malloc */ + /* no alignment, just call alloc */ void *block; void **p; ; block = malloc(sizeof(void*)+size); @@ -231,20 +808,12 @@ return NULL; p = (void**)block; *p = block; - if( _use_alloc_counter ) - { -#if AE_OS==AE_WINDOWS - InterlockedIncrement((LONG volatile *)&_alloc_counter); -#else -#endif - } - return (void*)((char*)block+sizeof(void*)); + result = (char*)((char*)block+sizeof(void*)); } else { /* align */ void *block; - char *result; block = malloc(alignment-1+sizeof(void*)+size); if( block==NULL ) return NULL; @@ -253,35 +822,59 @@ result += alignment - (result-(char*)0)%alignment;*/ result = (char*)ae_align(result, alignment); *((void**)(result-sizeof(void*))) = block; - if( _use_alloc_counter ) - { -#if AE_OS==AE_WINDOWS - InterlockedIncrement((LONG volatile *)&_alloc_counter); + } + + /* update counters (if flag is set) */ + if( _use_alloc_counter ) + { + ae_optional_atomic_add_i(&_alloc_counter, 1); + ae_optional_atomic_add_i(&_alloc_counter_total, 1); + } + if( _use_dbg_counters ) + ae_optional_atomic_add_i(&_dbg_alloc_total, (ae_int64_t)size); + + /* return */ + return (void*)result; +#endif +} + +void* aligned_extract_ptr(void *block) +{ +#if AE_MALLOC==AE_BASIC_STATIC_MALLOC + return NULL; #else + if( block==NULL ) + return NULL; + return *((void**)((char*)block-sizeof(void*))); #endif - } - return result; - } } void aligned_free(void *block) { +#if AE_MALLOC==AE_BASIC_STATIC_MALLOC + ae_static_free(block); +#else void *p; if( block==NULL ) return; - p = *((void**)((char*)block-sizeof(void*))); + p = aligned_extract_ptr(block); free(p); if( _use_alloc_counter ) - { -#if AE_OS==AE_WINDOWS - InterlockedDecrement((LONG volatile *)&_alloc_counter); -#else + ae_optional_atomic_sub_i(&_alloc_counter, 1); #endif - } +} + +void* eternal_malloc(size_t size) +{ + if( size==0 ) + return NULL; + if( _force_malloc_failure ) + return NULL; + return malloc(size); } /************************************************************************ -Malloc's memory with automatic alignment. +Allocate memory with automatic alignment. Returns NULL when zero size is specified. @@ -348,6 +941,51 @@ } } +/************************************************************************ +Checks that n bytes pointed by ptr are zero. + +This function is used in the constructors to check that instance fields +on entry are correctly initialized by zeros. +************************************************************************/ +ae_bool ae_check_zeros(const void *ptr, ae_int_t n) +{ + ae_int_t nu, nr, i; + unsigned long long c = 0x0; + + /* + * determine leading and trailing lengths + */ + nu = n/sizeof(unsigned long long); + nr = n%sizeof(unsigned long long); + + /* + * handle leading nu long long elements + */ + if( nu>0 ) + { + const unsigned long long *p_ull; + p_ull = (const unsigned long long *)ptr; + for(i=0; i0 ) + { + const unsigned char *p_uc; + p_uc = ((const unsigned char *)ptr)+nu*sizeof(unsigned long long); + for(i=0; iflags = 0x0; /* * p_next points to itself because: @@ -387,9 +1030,7 @@ state->last_block.deallocator = NULL; state->last_block.ptr = DYN_BOTTOM; state->p_top_block = &(state->last_block); -#ifndef AE_USE_CPP_ERROR_HANDLING state->break_jump = NULL; -#endif state->error_msg = ""; /* @@ -443,7 +1084,6 @@ } -#ifndef AE_USE_CPP_ERROR_HANDLING /************************************************************************ This function sets jump buffer for error handling. @@ -453,7 +1093,17 @@ { state->break_jump = buf; } -#endif + + +/************************************************************************ +This function sets flags member of the ae_state structure + +buf may be NULL. +************************************************************************/ +void ae_state_set_flags(ae_state *state, ae_uint64_t flags) +{ + state->flags = flags; +} /************************************************************************ @@ -496,6 +1146,8 @@ block block state ALGLIB environment state +This function does NOT generate exceptions. + NOTES: * never call it for special blocks which marks frame boundaries! ************************************************************************/ @@ -507,46 +1159,51 @@ /************************************************************************ -This function malloc's dynamic block: +This function initializes dynamic block: -block destination block, assumed to be uninitialized -size size (in bytes) -state ALGLIB environment state. May be NULL. +block destination block, MUST be zero-filled on entry +size size (in bytes), >=0. +state ALGLIB environment state, non-NULL make_automatic if true, vector is added to the dynamic block list -block is assumed to be uninitialized, its fields are ignored. +block is assumed to be uninitialized, its fields are ignored. You may +call this function with zero size in order to register block in the +dynamic list. -Error handling: -* if state is NULL, returns ae_false on allocation error -* if state is not NULL, calls ae_break() on allocation error -* returns ae_true on success +Error handling: calls ae_break() on allocation error. Block is left in +valid state (empty, but valid). NOTES: -* never call it for blocks which are already in the list +* never call it for blocks which are already in the list; use ae_db_realloc + for already allocated blocks. + +NOTE: no memory allocation is performed for initialization with size=0 ************************************************************************/ -ae_bool ae_db_malloc(ae_dyn_block *block, ae_int_t size, ae_state *state, ae_bool make_automatic) +void ae_db_init(ae_dyn_block *block, ae_int_t size, ae_state *state, ae_bool make_automatic) { - /* ensure that size is >=0 - two ways to exit: 1) through ae_assert, if we have non-NULL state, 2) by returning ae_false */ - if( state!=NULL ) - ae_assert(size>=0, "ae_db_malloc(): negative size", state); - if( size<0 ) - return ae_false; + AE_CRITICAL_ASSERT(state!=NULL); + AE_CRITICAL_ASSERT(ae_check_zeros(block,sizeof(*block))); - /* allocation */ - block->ptr = ae_malloc((size_t)size, state); - if( block->ptr==NULL && size!=0 ) - { - /* for state!=NULL exception is thrown from ae_malloc(), so - we have to handle only situation when state is NULL */ - return ae_false; - } - if( make_automatic && state!=NULL ) + /* + * NOTE: these strange dances around block->ptr are necessary + * in order to correctly handle possible exceptions during + * memory allocation. + */ + ae_assert(size>=0, "ae_db_init(): negative size", state); + block->ptr = NULL; + block->valgrind_hint = NULL; + ae_touch_ptr(block->ptr); + ae_touch_ptr(block->valgrind_hint); + if( make_automatic ) ae_db_attach(block, state); else block->p_next = NULL; + if( size!=0 ) + { + block->ptr = ae_malloc((size_t)size, state); + block->valgrind_hint = aligned_extract_ptr(block->ptr); + } block->deallocator = ae_free; - return ae_true; } @@ -563,35 +1220,31 @@ * deletes old contents * preserves automatic state -Error handling: -* if state is NULL, returns ae_false on allocation error -* if state is not NULL, calls ae_break() on allocation error -* returns ae_true on success +Error handling: calls ae_break() on allocation error. Block is left in +valid state - empty, but valid. NOTES: * never call it for special blocks which mark frame boundaries! ************************************************************************/ -ae_bool ae_db_realloc(ae_dyn_block *block, ae_int_t size, ae_state *state) +void ae_db_realloc(ae_dyn_block *block, ae_int_t size, ae_state *state) { - /* ensure that size is >=0 - two ways to exit: 1) through ae_assert, if we have non-NULL state, 2) by returning ae_false */ - if( state!=NULL ) - ae_assert(size>=0, "ae_db_realloc(): negative size", state); - if( size<0 ) - return ae_false; + AE_CRITICAL_ASSERT(state!=NULL); - /* realloc */ + /* + * NOTE: these strange dances around block->ptr are necessary + * in order to correctly handle possible exceptions during + * memory allocation. + */ + ae_assert(size>=0, "ae_db_realloc(): negative size", state); if( block->ptr!=NULL ) - ((ae_deallocator)block->deallocator)(block->ptr); - block->ptr = ae_malloc((size_t)size, state); - if( block->ptr==NULL && size!=0 ) { - /* for state!=NULL exception is thrown from ae_malloc(), so - we have to handle only situation when state is NULL */ - return ae_false; + ((ae_deallocator)block->deallocator)(block->ptr); + block->ptr = NULL; + block->valgrind_hint = NULL; } + block->ptr = ae_malloc((size_t)size, state); + block->valgrind_hint = aligned_extract_ptr(block->ptr); block->deallocator = ae_free; - return ae_true; } @@ -610,6 +1263,7 @@ if( block->ptr!=NULL ) ((ae_deallocator)block->deallocator)(block->ptr); block->ptr = NULL; + block->valgrind_hint = NULL; block->deallocator = ae_free; } @@ -625,11 +1279,18 @@ { void (*deallocator)(void*) = NULL; void * volatile ptr; + void * valgrind_hint; + ptr = block1->ptr; + valgrind_hint = block1->valgrind_hint; deallocator = block1->deallocator; + block1->ptr = block2->ptr; + block1->valgrind_hint = block2->valgrind_hint; block1->deallocator = block2->deallocator; + block2->ptr = ptr; + block2->valgrind_hint = valgrind_hint; block2->deallocator = deallocator; } @@ -637,40 +1298,37 @@ This function creates ae_vector. Vector size may be zero. Vector contents is uninitialized. -dst destination vector, assumed to be uninitialized, its - fields are ignored. +dst destination vector, MUST be zero-filled (we check it + and call abort() if *dst is non-zero; the rationale is + that we can not correctly handle errors in constructors + without zero-filling). size vector size, may be zero datatype guess what... -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, vector will be registered in the current frame + of the state structure; -Error handling: -* on failure (size<0 or unable to allocate memory) - calls ae_break() with - NULL state pointer. Usually it results in abort() call. - -dst is +NOTE: no memory allocation is performed for initialization with size=0 *************************************************************************/ -void ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state) +void ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state, ae_bool make_automatic) { - /* ensure that size is >=0 - two ways to exit: 1) through ae_assert, if we have non-NULL state, 2) by returning ae_false */ - ae_assert( - size>=0, - "ae_vector_init(): negative size", - NULL); - + /* + * Integrity checks + */ + AE_CRITICAL_ASSERT(state!=NULL); + AE_CRITICAL_ASSERT(ae_check_zeros(dst,sizeof(*dst))); + ae_assert(size>=0, "ae_vector_init(): negative size", state); + + /* prepare for possible errors during allocation */ + dst->cnt = 0; + dst->ptr.p_ptr = NULL; + /* init */ + ae_db_init(&dst->data, size*ae_sizeof(datatype), state, make_automatic); dst->cnt = size; dst->datatype = datatype; - ae_assert( - ae_db_malloc(&dst->data, size*ae_sizeof(datatype), state, state!=NULL), /* TODO: change ae_db_malloc() */ - "ae_vector_init(): failed to allocate memory", - NULL); dst->ptr.p_ptr = dst->data.ptr; dst->is_attached = ae_false; } @@ -680,27 +1338,26 @@ This function creates copy of ae_vector. New copy of the data is created, which is managed and owned by newly initialized vector. -dst destination vector +dst destination vector, MUST be zero-filled (we check it + and call abort() if *dst is non-zero; the rationale is + that we can not correctly handle errors in constructors + without zero-filling). src well, it is source -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, vector will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state) +void ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state, ae_bool make_automatic) { - ae_vector_init(dst, src->cnt, src->datatype, state); + AE_CRITICAL_ASSERT(state!=NULL); + + ae_vector_init(dst, src->cnt, src->datatype, state, make_automatic); if( src->cnt!=0 ) - memcpy(dst->ptr.p_ptr, src->ptr.p_ptr, (size_t)(src->cnt*ae_sizeof(src->datatype))); + memmove(dst->ptr.p_ptr, src->ptr.p_ptr, (size_t)(src->cnt*ae_sizeof(src->datatype))); } /************************************************************************ @@ -709,27 +1366,26 @@ structures (source and destination) remain completely independent after this call. -dst destination matrix +dst destination vector, MUST be zero-filled (we check it + and call abort() if *dst is non-zero; the rationale is + that we can not correctly handle errors in constructors + without zero-filling). src well, it is source -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, vector will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_vector_init_from_x(ae_vector *dst, x_vector *src, ae_state *state) +void ae_vector_init_from_x(ae_vector *dst, x_vector *src, ae_state *state, ae_bool make_automatic) { - ae_vector_init(dst, (ae_int_t)src->cnt, (ae_datatype)src->datatype, state); + AE_CRITICAL_ASSERT(state!=NULL); + + ae_vector_init(dst, (ae_int_t)src->cnt, (ae_datatype)src->datatype, state, make_automatic); if( src->cnt>0 ) - memcpy(dst->ptr.p_ptr, src->ptr, (size_t)(((ae_int_t)src->cnt)*ae_sizeof((ae_datatype)src->datatype))); + memmove(dst->ptr.p_ptr, src->x_ptr.p_ptr, (size_t)(((ae_int_t)src->cnt)*ae_sizeof((ae_datatype)src->datatype))); } /************************************************************************ @@ -748,39 +1404,39 @@ dst destination vector src well, it is source -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, vector will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_vector_attach_to_x(ae_vector *dst, x_vector *src, ae_state *state) +void ae_vector_init_attach_to_x(ae_vector *dst, x_vector *src, ae_state *state, ae_bool make_automatic) { volatile ae_int_t cnt; + AE_CRITICAL_ASSERT(state!=NULL); + AE_CRITICAL_ASSERT(ae_check_zeros(dst,sizeof(*dst))); + cnt = (ae_int_t)src->cnt; /* ensure that size is correct */ - ae_assert(cnt==src->cnt, "ae_vector_attach_to_x(): 32/64 overflow", NULL); - ae_assert(cnt>=0, "ae_vector_attach_to_x(): negative length", NULL); + ae_assert(cnt==src->cnt, "ae_vector_init_attach_to_x(): 32/64 overflow", state); + ae_assert(cnt>=0, "ae_vector_init_attach_to_x(): negative length", state); + + /* prepare for possible errors during allocation */ + dst->cnt = 0; + dst->ptr.p_ptr = NULL; + dst->datatype = (ae_datatype)src->datatype; + + /* zero-size init in order to correctly register in the frame */ + ae_db_init(&dst->data, 0, state, make_automatic); /* init */ dst->cnt = cnt; - dst->datatype = (ae_datatype)src->datatype; - dst->ptr.p_ptr = src->ptr; + dst->ptr.p_ptr = src->x_ptr.p_ptr; dst->is_attached = ae_true; - ae_assert( - ae_db_malloc(&dst->data, 0, state, state!=NULL), - "ae_vector_attach_to_x(): malloc error", - NULL); } /************************************************************************ @@ -788,35 +1444,56 @@ dst destination vector newsize vector size, may be zero -state ALGLIB environment state +state ALGLIB environment state, can not be NULL -Error handling: -* if state is NULL, returns ae_false on allocation error -* if state is not NULL, calls ae_break() on allocation error -* returns ae_true on success +Error handling: calls ae_break() on allocation error NOTES: * vector must be initialized * all contents is destroyed during setlength() call * new size may be zero. ************************************************************************/ -ae_bool ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *state) +void ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *state) { - /* ensure that size is >=0 - two ways to exit: 1) through ae_assert, if we have non-NULL state, 2) by returning ae_false */ - if( state!=NULL ) - ae_assert(newsize>=0, "ae_vector_set_length(): negative size", state); - if( newsize<0 ) - return ae_false; - - /* set length */ + AE_CRITICAL_ASSERT(state!=NULL); + ae_assert(newsize>=0, "ae_vector_set_length(): negative size", state); if( dst->cnt==newsize ) - return ae_true; + return; + + /* realloc, being ready for exception during reallocation (cnt=ptr=0 on entry) */ + dst->cnt = 0; + dst->ptr.p_ptr = NULL; + ae_db_realloc(&dst->data, newsize*ae_sizeof(dst->datatype), state); dst->cnt = newsize; - if( !ae_db_realloc(&dst->data, newsize*ae_sizeof(dst->datatype), state) ) - return ae_false; dst->ptr.p_ptr = dst->data.ptr; - return ae_true; +} + +/************************************************************************ +This function resized ae_vector, preserving previously existing elements. +Values of elements added during vector growth is undefined. + +dst destination vector +newsize vector size, may be zero +state ALGLIB environment state, can not be NULL + +Error handling: calls ae_break() on allocation error + +NOTES: +* vector must be initialized +* new size may be zero. +************************************************************************/ +void ae_vector_resize(ae_vector *dst, ae_int_t newsize, ae_state *state) +{ + ae_vector tmp; + ae_int_t bytes_total; + + memset(&tmp, 0, sizeof(tmp)); + ae_vector_init(&tmp, newsize, dst->datatype, state, ae_false); + bytes_total = (dst->cntcnt : newsize)*ae_sizeof(dst->datatype); + if( bytes_total>0 ) + memmove(tmp.ptr.p_ptr, dst->ptr.p_ptr, bytes_total); + ae_swap_vectors(dst, &tmp); + ae_vector_clear(&tmp); } @@ -888,80 +1565,80 @@ Matrix size may be zero, in such cases both rows and cols are zero. Matrix contents is uninitialized. -dst destination matrix, assumed to be unitialized, its - fields are ignored +dst destination matrix, must be zero-filled rows rows count cols cols count datatype element type -state depending on your desire to register matrix in the - current frame: - * pointer to ALGLIB environment state, if you want the - matrix to be automatically managed - * NULL, if you do not want it to be automatically - managed - -Error handling: -* calls ae_break() with NULL state; usually it results in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, matrix will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. + +NOTE: no memory allocation is performed for initialization with rows=cols=0 ************************************************************************/ -void ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_datatype datatype, ae_state *state) +void ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_datatype datatype, ae_state *state, ae_bool make_automatic) { - ae_assert(rows>=0 && cols>=0, "ae_matrix_init(): negative length", NULL); + AE_CRITICAL_ASSERT(state!=NULL); + AE_CRITICAL_ASSERT(ae_check_zeros(dst,sizeof(*dst))); + + ae_assert(rows>=0 && cols>=0, "ae_matrix_init(): negative length", state); - /* if one of rows/cols is zero, another MUST be too */ + /* if one of rows/cols is zero, another MUST be too; perform quick exit */ if( rows==0 || cols==0 ) { - rows = 0; - cols = 0; + dst->rows = 0; + dst->cols = 0; + dst->is_attached = ae_false; + dst->ptr.pp_void = NULL; + dst->stride = 0; + dst->datatype = datatype; + ae_db_init(&dst->data, 0, state, make_automatic); + return; } - /* init */ + /* init, being ready for exception during allocation (rows=cols=ptr=NULL on entry) */ dst->is_attached = ae_false; - dst->rows = rows; - dst->cols = cols; + dst->rows = 0; + dst->cols = 0; + dst->ptr.pp_void = NULL; dst->stride = cols; while( dst->stride*ae_sizeof(datatype)%AE_DATA_ALIGN!=0 ) dst->stride++; dst->datatype = datatype; - ae_assert( - ae_db_malloc(&dst->data, dst->rows*((ae_int_t)sizeof(void*)+dst->stride*ae_sizeof(datatype))+AE_DATA_ALIGN-1, state, state!=NULL), /* TODO: change ae_db_malloc() */ - "ae_matrix_init(): failed to allocate memory", - NULL); - ae_matrix_update_row_pointers(dst, ae_align((char*)dst->data.ptr+dst->rows*sizeof(void*),AE_DATA_ALIGN)); + ae_db_init(&dst->data, rows*((ae_int_t)sizeof(void*)+dst->stride*ae_sizeof(datatype))+AE_DATA_ALIGN-1, state, make_automatic); + dst->rows = rows; + dst->cols = cols; + ae_matrix_update_row_pointers(dst, ae_align((char*)dst->data.ptr+rows*sizeof(void*),AE_DATA_ALIGN)); } /************************************************************************ This function creates copy of ae_matrix. A new copy of the data is created. -dst destination matrix +dst destination matrix, must be zero-filled src well, it is source -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, matrix will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state) +void ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state, ae_bool make_automatic) { ae_int_t i; - ae_matrix_init(dst, src->rows, src->cols, src->datatype, state); + ae_matrix_init(dst, src->rows, src->cols, src->datatype, state, make_automatic); if( src->rows!=0 && src->cols!=0 ) { if( dst->stride==src->stride ) - memcpy(dst->ptr.pp_void[0], src->ptr.pp_void[0], (size_t)(src->rows*src->stride*ae_sizeof(src->datatype))); + memmove(dst->ptr.pp_void[0], src->ptr.pp_void[0], (size_t)(src->rows*src->stride*ae_sizeof(src->datatype))); else for(i=0; irows; i++) - memcpy(dst->ptr.pp_void[i], src->ptr.pp_void[i], (size_t)(dst->cols*ae_sizeof(dst->datatype))); + memmove(dst->ptr.pp_void[i], src->ptr.pp_void[i], (size_t)(dst->cols*ae_sizeof(dst->datatype))); } } @@ -972,36 +1649,31 @@ structures (source and destination) remain completely independent after this call. -dst destination matrix +dst destination matrix, must be zero-filled src well, it is source -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, matrix will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state) +void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic) { char *p_src_row; char *p_dst_row; ae_int_t row_size; ae_int_t i; - ae_matrix_init(dst, (ae_int_t)src->rows, (ae_int_t)src->cols, (ae_datatype)src->datatype, state); + AE_CRITICAL_ASSERT(state!=NULL); + ae_matrix_init(dst, (ae_int_t)src->rows, (ae_int_t)src->cols, (ae_datatype)src->datatype, state, make_automatic); if( src->rows!=0 && src->cols!=0 ) { - p_src_row = (char*)src->ptr; + p_src_row = (char*)src->x_ptr.p_ptr; p_dst_row = (char*)(dst->ptr.pp_void[0]); row_size = ae_sizeof((ae_datatype)src->datatype)*(ae_int_t)src->cols; for(i=0; irows; i++, p_src_row+=src->stride*ae_sizeof((ae_datatype)src->datatype), p_dst_row+=dst->stride*ae_sizeof((ae_datatype)src->datatype)) - memcpy(p_dst_row, p_src_row, (size_t)(row_size)); + memmove(p_dst_row, p_src_row, (size_t)(row_size)); } } @@ -1017,33 +1689,33 @@ remains untouched * SRC, however, CAN NOT BE REALLOCATED AS LONG AS DST EXISTS -dst destination matrix +dst destination matrix, must be zero-filled src well, it is source -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, matrix will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_matrix_attach_to_x(ae_matrix *dst, x_matrix *src, ae_state *state) +void ae_matrix_init_attach_to_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic) { ae_int_t rows, cols; + AE_CRITICAL_ASSERT(state!=NULL); + AE_CRITICAL_ASSERT(ae_check_zeros(dst,sizeof(*dst))); + rows = (ae_int_t)src->rows; cols = (ae_int_t)src->cols; + /* check that X-source is densely packed */ + ae_assert(src->cols==src->stride, "ae_matrix_init_attach_to_x(): unsupported stride", state); + /* ensure that size is correct */ - ae_assert(rows==src->rows, "ae_matrix_attach_to_x(): 32/64 overflow", NULL); - ae_assert(cols==src->cols, "ae_matrix_attach_to_x(): 32/64 overflow", NULL); - ae_assert(rows>=0 && cols>=0, "ae_matrix_attach_to_x(): negative length", NULL); + ae_assert(rows==src->rows, "ae_matrix_init_attach_to_x(): 32/64 overflow", state); + ae_assert(cols==src->cols, "ae_matrix_init_attach_to_x(): 32/64 overflow", state); + ae_assert(rows>=0 && cols>=0, "ae_matrix_init_attach_to_x(): negative length", state); /* if one of rows/cols is zero, another MUST be too */ if( rows==0 || cols==0 ) @@ -1052,24 +1724,23 @@ cols = 0; } - /* init */ + /* init, being ready for allocation error */ dst->is_attached = ae_true; - dst->rows = rows; - dst->cols = cols; + dst->rows = 0; + dst->cols = 0; dst->stride = cols; dst->datatype = (ae_datatype)src->datatype; dst->ptr.pp_void = NULL; - ae_assert( - ae_db_malloc(&dst->data, dst->rows*(ae_int_t)sizeof(void*), state, state!=NULL), - "ae_matrix_attach_to_x(): malloc error", - NULL); + ae_db_init(&dst->data, rows*(ae_int_t)sizeof(void*), state, make_automatic); + dst->rows = rows; + dst->cols = cols; if( dst->rows>0 && dst->cols>0 ) { ae_int_t i, rowsize; char *p_row; void **pp_ptr; - p_row = (char*)src->ptr; + p_row = (char*)src->x_ptr.p_ptr; rowsize = dst->stride*ae_sizeof(dst->datatype); pp_ptr = (void**)dst->data.ptr; dst->ptr.pp_void = pp_ptr; @@ -1097,26 +1768,28 @@ * all contents is destroyed during setlength() call * new size may be zero. ************************************************************************/ -ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state) +void ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state) { - /* ensure that size is >=0 - two ways to exit: 1) through ae_assert, if we have non-NULL state, 2) by returning ae_false */ - if( state!=NULL ) - ae_assert(rows>=0 && cols>=0, "ae_matrix_set_length(): negative length", state); - if( rows<0 || cols<0 ) - return ae_false; - + AE_CRITICAL_ASSERT(state!=NULL); + ae_assert(rows>=0 && cols>=0, "ae_matrix_set_length(): negative length", state); if( dst->rows==rows && dst->cols==cols ) - return ae_true; - dst->rows = rows; - dst->cols = cols; + return; + + /* prepare stride */ dst->stride = cols; while( dst->stride*ae_sizeof(dst->datatype)%AE_DATA_ALIGN!=0 ) dst->stride++; - if( !ae_db_realloc(&dst->data, dst->rows*((ae_int_t)sizeof(void*)+dst->stride*ae_sizeof(dst->datatype))+AE_DATA_ALIGN-1, state) ) - return ae_false; + + /* realloc, being ready for an exception during reallocation (rows=cols=0 on entry) */ + dst->rows = 0; + dst->cols = 0; + dst->ptr.pp_void = NULL; + ae_db_realloc(&dst->data, rows*((ae_int_t)sizeof(void*)+dst->stride*ae_sizeof(dst->datatype))+AE_DATA_ALIGN-1, state); + dst->rows = rows; + dst->cols = cols; + + /* update pointers to rows */ ae_matrix_update_row_pointers(dst, ae_align((char*)dst->data.ptr+dst->rows*sizeof(void*),AE_DATA_ALIGN)); - return ae_true; } @@ -1199,18 +1872,15 @@ /************************************************************************ This function creates smart pointer structure. -dst destination smart pointer. - already allocated, but not initialized. +dst destination smart pointer, must be zero-filled subscriber pointer to pointer which receives updates in the internal object stored in ae_smart_ptr. Any update to dst->ptr is translated to subscriber. Can be NULL. -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, pointer will be registered in the current frame + of the state structure; Error handling: * on failure calls ae_break() with NULL state pointer. Usually it results @@ -1218,8 +1888,10 @@ After initialization, smart pointer stores NULL pointer. ************************************************************************/ -void ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *state) +void ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *state, ae_bool make_automatic) { + AE_CRITICAL_ASSERT(state!=NULL); + AE_CRITICAL_ASSERT(ae_check_zeros(dst,sizeof(*dst))); dst->subscriber = subscriber; dst->ptr = NULL; if( dst->subscriber!=NULL ) @@ -1228,7 +1900,7 @@ dst->is_dynamic = ae_false; dst->frame_entry.deallocator = ae_smart_ptr_destroy; dst->frame_entry.ptr = dst; - if( state!=NULL ) + if( make_automatic ) ae_db_attach(&dst->frame_entry, state); } @@ -1293,7 +1965,11 @@ void ae_smart_ptr_assign(ae_smart_ptr *dst, void *new_ptr, ae_bool is_owner, ae_bool is_dynamic, void (*destroy)(void*)) { if( dst->is_owner && dst->ptr!=NULL ) + { dst->destroy(dst->ptr); + if( dst->is_dynamic ) + ae_free(dst->ptr); + } if( new_ptr!=NULL ) { dst->ptr = new_ptr; @@ -1362,7 +2038,7 @@ ************************************************************************/ void ae_x_set_vector(x_vector *dst, ae_vector *src, ae_state *state) { - if( src->ptr.p_ptr == dst->ptr ) + if( src->ptr.p_ptr == dst->x_ptr.p_ptr ) { /* src->ptr points to the beginning of dst, attached matrices, no need to copy */ return; @@ -1370,9 +2046,9 @@ if( dst->cnt!=src->cnt || dst->datatype!=src->datatype ) { if( dst->owner==OWN_AE ) - ae_free(dst->ptr); - dst->ptr = ae_malloc((size_t)(src->cnt*ae_sizeof(src->datatype)), state); - if( src->cnt!=0 && dst->ptr==NULL ) + ae_free(dst->x_ptr.p_ptr); + dst->x_ptr.p_ptr = ae_malloc((size_t)(src->cnt*ae_sizeof(src->datatype)), state); + if( src->cnt!=0 && dst->x_ptr.p_ptr==NULL ) ae_break(state, ERR_OUT_OF_MEMORY, "ae_malloc(): out of memory"); dst->last_action = ACT_NEW_LOCATION; dst->cnt = src->cnt; @@ -1391,7 +2067,7 @@ ae_assert(ae_false, "ALGLIB: internal error in ae_x_set_vector()", state); } if( src->cnt ) - memcpy(dst->ptr, src->ptr.p_ptr, (size_t)(src->cnt*ae_sizeof(src->datatype))); + memmove(dst->x_ptr.p_ptr, src->ptr.p_ptr, (size_t)(src->cnt*ae_sizeof(src->datatype))); } /************************************************************************ @@ -1426,7 +2102,7 @@ char *p_dst_row; ae_int_t i; ae_int_t row_size; - if( src->ptr.pp_void!=NULL && src->ptr.pp_void[0] == dst->ptr ) + if( src->ptr.pp_void!=NULL && src->ptr.pp_void[0] == dst->x_ptr.p_ptr ) { /* src->ptr points to the beginning of dst, attached matrices, no need to copy */ return; @@ -1434,13 +2110,13 @@ if( dst->rows!=src->rows || dst->cols!=src->cols || dst->datatype!=src->datatype ) { if( dst->owner==OWN_AE ) - ae_free(dst->ptr); + ae_free(dst->x_ptr.p_ptr); dst->rows = src->rows; dst->cols = src->cols; dst->stride = src->cols; dst->datatype = src->datatype; - dst->ptr = ae_malloc((size_t)(dst->rows*((ae_int_t)dst->stride)*ae_sizeof(src->datatype)), state); - if( dst->rows!=0 && dst->stride!=0 && dst->ptr==NULL ) + dst->x_ptr.p_ptr = ae_malloc((size_t)(dst->rows*((ae_int_t)dst->stride)*ae_sizeof(src->datatype)), state); + if( dst->rows!=0 && dst->stride!=0 && dst->x_ptr.p_ptr==NULL ) ae_break(state, ERR_OUT_OF_MEMORY, "ae_malloc(): out of memory"); dst->last_action = ACT_NEW_LOCATION; dst->owner = OWN_AE; @@ -1459,10 +2135,10 @@ if( src->rows!=0 && src->cols!=0 ) { p_src_row = (char*)(src->ptr.pp_void[0]); - p_dst_row = (char*)dst->ptr; + p_dst_row = (char*)dst->x_ptr.p_ptr; row_size = ae_sizeof(src->datatype)*src->cols; for(i=0; irows; i++, p_src_row+=src->stride*ae_sizeof(src->datatype), p_dst_row+=dst->stride*ae_sizeof(src->datatype)) - memcpy(p_dst_row, p_src_row, (size_t)(row_size)); + memmove(p_dst_row, p_src_row, (size_t)(row_size)); } } @@ -1484,8 +2160,8 @@ void ae_x_attach_to_vector(x_vector *dst, ae_vector *src) { if( dst->owner==OWN_AE ) - ae_free(dst->ptr); - dst->ptr = src->ptr.p_ptr; + ae_free(dst->x_ptr.p_ptr); + dst->x_ptr.p_ptr = src->ptr.p_ptr; dst->last_action = ACT_NEW_LOCATION; dst->cnt = src->cnt; dst->datatype = src->datatype; @@ -1510,12 +2186,12 @@ void ae_x_attach_to_matrix(x_matrix *dst, ae_matrix *src) { if( dst->owner==OWN_AE ) - ae_free(dst->ptr); + ae_free(dst->x_ptr.p_ptr); dst->rows = src->rows; dst->cols = src->cols; dst->stride = src->stride; dst->datatype = src->datatype; - dst->ptr = &(src->ptr.pp_double[0][0]); + dst->x_ptr.p_ptr = &(src->ptr.pp_double[0][0]); dst->last_action = ACT_NEW_LOCATION; dst->owner = OWN_CALLER; } @@ -1529,8 +2205,8 @@ void x_vector_clear(x_vector *dst) { if( dst->owner==OWN_AE ) - aligned_free(dst->ptr); - dst->ptr = NULL; + aligned_free(dst->x_ptr.p_ptr); + dst->x_ptr.p_ptr = NULL; dst->cnt = 0; } @@ -1541,6 +2217,10 @@ removing all frames and deallocating registered dynamic data structure. For NULL state it just abort()'s program. + +IMPORTANT: this function ALWAYS evaluates its argument. It can not be + replaced by macro which does nothing. So, you may place actual + function calls at cond, and these will always be performed. ************************************************************************/ void ae_assert(ae_bool cond, const char *msg, ae_state *state) { @@ -1640,6 +2320,108 @@ } /************************************************************************ +Activates tracing to file + +IMPORTANT: this function is NOT thread-safe! Calling it from multiple + threads will result in undefined behavior. Calling it when + some thread calls ALGLIB functions may result in undefined + behavior. +************************************************************************/ +void ae_trace_file(const char *tags, const char *filename) +{ + /* + * clean up previous call + */ + if( alglib_fclose_trace ) + { + if( alglib_trace_file!=NULL ) + fclose(alglib_trace_file); + alglib_trace_file = NULL; + alglib_fclose_trace = ae_false; + } + + /* + * store ",tags," to buffer. Leading and trailing commas allow us + * to perform checks for various tags by simply calling strstr(). + */ + memset(alglib_trace_tags, 0, ALGLIB_TRACE_BUFFER_LEN); + strcat(alglib_trace_tags, ","); + strncat(alglib_trace_tags, tags, ALGLIB_TRACE_TAGS_LEN); + strcat(alglib_trace_tags, ","); + for(int i=0; alglib_trace_tags[i]!=0; i++) + alglib_trace_tags[i] = tolower(alglib_trace_tags[i]); + + /* + * set up trace + */ + alglib_trace_type = ALGLIB_TRACE_FILE; + alglib_trace_file = fopen(filename, "ab"); + alglib_fclose_trace = ae_true; +} + +/************************************************************************ +Disables tracing +************************************************************************/ +void ae_trace_disable() +{ + alglib_trace_type = ALGLIB_TRACE_NONE; + if( alglib_fclose_trace ) + fclose(alglib_trace_file); + alglib_trace_file = NULL; + alglib_fclose_trace = ae_false; +} + +/************************************************************************ +Checks whether specific kind of tracing is enabled +************************************************************************/ +ae_bool ae_is_trace_enabled(const char *tag) +{ + char buf[ALGLIB_TRACE_BUFFER_LEN]; + + /* check global trace status */ + if( alglib_trace_type==ALGLIB_TRACE_NONE || alglib_trace_file==NULL ) + return ae_false; + + /* copy tag to buffer, lowercase it */ + memset(buf, 0, ALGLIB_TRACE_BUFFER_LEN); + strcat(buf, ","); + strncat(buf, tag, ALGLIB_TRACE_TAGS_LEN); + strcat(buf, "?"); + for(int i=0; buf[i]!=0; i++) + buf[i] = tolower(buf[i]); + + /* contains tag (followed by comma, which means exact match) */ + buf[strlen(buf)-1] = ','; + if( strstr(alglib_trace_tags,buf)!=NULL ) + return ae_true; + + /* contains tag (followed by dot, which means match with child) */ + buf[strlen(buf)-1] = '.'; + if( strstr(alglib_trace_tags,buf)!=NULL ) + return ae_true; + + /* nothing */ + return ae_false; +} + +void ae_trace(const char * printf_fmt, ...) +{ + /* check global trace status */ + if( alglib_trace_type==ALGLIB_TRACE_FILE && alglib_trace_file!=NULL ) + { + va_list args; + + /* fprintf() */ + va_start(args, printf_fmt); + vfprintf(alglib_trace_file, printf_fmt, args); + va_end(args); + + /* flush output */ + fflush(alglib_trace_file); + } +} + +/************************************************************************ Real math functions ************************************************************************/ ae_bool ae_fp_eq(double v1, double v2) @@ -2088,8 +2870,8 @@ double v; ae_int_t i, j; - p1 = (double*)(a->ptr)+offset0*a->stride+offset1; - p2 = (double*)(a->ptr)+offset1*a->stride+offset0; + p1 = (double*)(a->x_ptr.p_ptr)+offset0*a->stride+offset1; + p2 = (double*)(a->x_ptr.p_ptr)+offset1*a->stride+offset0; for(i=0; iptr)+offset*a->stride+offset; + p = (double*)(a->x_ptr.p_ptr)+offset*a->stride+offset; for(i=0; iptr)+offset0*a->stride+offset1; - p2 = (ae_complex*)(a->ptr)+offset1*a->stride+offset0; + p1 = (ae_complex*)(a->x_ptr.p_ptr)+offset0*a->stride+offset1; + p2 = (ae_complex*)(a->x_ptr.p_ptr)+offset1*a->stride+offset0; for(i=0; iptr)+offset*a->stride+offset; + p = (ae_complex*)(a->x_ptr.p_ptr)+offset*a->stride+offset; for(i=0; iptr)+offset0*a->stride+offset1; - p2 = (double*)(a->ptr)+offset1*a->stride+offset0; + p1 = (double*)(a->x_ptr.p_ptr)+offset0*a->stride+offset1; + p2 = (double*)(a->x_ptr.p_ptr)+offset1*a->stride+offset0; for(i=0; iptr)+offset*a->stride+offset; + p = (double*)(a->x_ptr.p_ptr)+offset*a->stride+offset; for(i=0; iptr)+offset0*a->stride+offset1; - p2 = (ae_complex*)(a->ptr)+offset1*a->stride+offset0; + p1 = (ae_complex*)(a->x_ptr.p_ptr)+offset0*a->stride+offset1; + p2 = (ae_complex*)(a->x_ptr.p_ptr)+offset1*a->stride+offset0; for(i=0; iptr)+offset*a->stride+offset; + p = (ae_complex*)(a->x_ptr.p_ptr)+offset*a->stride+offset; for(i=0; iendianness==AE_BIG_ENDIAN ) + { + for(i=0; i<(ae_int_t)(sizeof(ae_int_t)/2); i++) + { + unsigned char tc; + tc = bytes[i]; + bytes[i] = bytes[sizeof(ae_int_t)-1-i]; + bytes[sizeof(ae_int_t)-1-i] = tc; + } + } + + /* + * convert to six-bit representation, output + * + * NOTE: last 12th element of sixbits is always zero, we do not output it + */ + ae_threebytes2foursixbits(bytes+0, sixbits+0); + ae_threebytes2foursixbits(bytes+3, sixbits+4); + ae_threebytes2foursixbits(bytes+6, sixbits+8); + for(i=0; iendianness==AE_BIG_ENDIAN ) + { + for(i=0; i<(ae_int_t)(sizeof(ae_int_t)/2); i++) + { + unsigned char tc; + tc = u.bytes[i]; + u.bytes[i] = u.bytes[sizeof(ae_int_t)-1-i]; + u.bytes[sizeof(ae_int_t)-1-i] = tc; + } + } + return u.ival; +} + +/************************************************************************ +This function unserializes 64-bit integer value from string + +buf buffer which contains value; leading spaces/tabs/newlines are + ignored, traling spaces/tabs/newlines are treated as end of + the boolean value. +state ALGLIB environment state + +This function raises an error in case unexpected symbol is found +************************************************************************/ +ae_int64_t ae_str2int64(const char *buf, ae_state *state, const char **pasttheend) +{ + const char *emsg = "ALGLIB: unable to read integer value from stream"; + ae_int_t sixbits[12]; + ae_int_t sixbitsread, i; + unsigned char bytes[9]; + ae_int64_t result; + + /* + * 1. skip leading spaces + * 2. read and decode six-bit digits + * 3. set trailing digits to zeros + * 4. convert to little endian 64-bit integer representation + * 5. convert to big endian representation, if needed + */ + while( *buf==' ' || *buf=='\t' || *buf=='\n' || *buf=='\r' ) + buf++; + sixbitsread = 0; + while( *buf!=' ' && *buf!='\t' && *buf!='\n' && *buf!='\r' && *buf!=0 ) + { + ae_int_t d; + d = ae_char2sixbits(*buf); + if( d<0 || sixbitsread>=AE_SER_ENTRY_LENGTH ) + ae_break(state, ERR_ASSERTION_FAILED, emsg); + sixbits[sixbitsread] = d; + sixbitsread++; + buf++; + } + *pasttheend = buf; + if( sixbitsread==0 ) + ae_break(state, ERR_ASSERTION_FAILED, emsg); + for(i=sixbitsread; i<12; i++) + sixbits[i] = 0; + ae_foursixbits2threebytes(sixbits+0, bytes+0); + ae_foursixbits2threebytes(sixbits+4, bytes+3); + ae_foursixbits2threebytes(sixbits+8, bytes+6); if( state->endianness==AE_BIG_ENDIAN ) { for(i=0; i<(ae_int_t)(sizeof(ae_int_t)/2); i++) { unsigned char tc; - tc = u.bytes[i]; - u.bytes[i] = u.bytes[sizeof(ae_int_t)-1-i]; - u.bytes[sizeof(ae_int_t)-1-i] = tc; + tc = bytes[i]; + bytes[i] = bytes[sizeof(ae_int_t)-1-i]; + bytes[sizeof(ae_int_t)-1-i] = tc; } } - return u.ival; + memmove(&result, bytes, sizeof(result)); + return result; } @@ -2881,19 +3777,19 @@ if( ae_isnan(v, state) ) { const char *s = ".nan_______"; - memcpy(buf, s, strlen(s)+1); + memmove(buf, s, strlen(s)+1); return; } if( ae_isposinf(v, state) ) { const char *s = ".posinf____"; - memcpy(buf, s, strlen(s)+1); + memmove(buf, s, strlen(s)+1); return; } if( ae_isneginf(v, state) ) { const char *s = ".neginf____"; - memcpy(buf, s, strlen(s)+1); + memmove(buf, s, strlen(s)+1); return; } @@ -3062,14 +3958,15 @@ } /************************************************************************ -This function initializes ae_lock structure and sets lock in a free mode. +This function initializes _lock structure which is internally used by +ae_lock high-level structure. + +_lock structure is statically allocated, no malloc() calls is performed +during its allocation. However, you have to call _ae_free_lock_raw() in +order to deallocate this lock properly. ************************************************************************/ -void ae_init_lock(ae_lock *lock) +void _ae_init_lock_raw(_lock *p) { - _lock *p; - lock->ptr = malloc(sizeof(_lock)); - AE_CRITICAL_ASSERT(lock->ptr!=NULL); - p = (_lock*)lock->ptr; #if AE_OS==AE_WINDOWS p->p_lock = (ae_int_t*)ae_align((void*)(&p->buf),AE_LOCK_ALIGNMENT); p->p_lock[0] = 0; @@ -3082,17 +3979,14 @@ /************************************************************************ -This function acquires lock. In case lock is busy, we perform several -iterations inside tight loop before trying again. +This function acquires _lock structure. + +It is low-level workhorse utilized by ae_acquire_lock(). ************************************************************************/ -void ae_acquire_lock(ae_lock *lock) +void _ae_acquire_lock_raw(_lock *p) { #if AE_OS==AE_WINDOWS ae_int_t cnt = 0; -#endif - _lock *p; - p = (_lock*)lock->ptr; -#if AE_OS==AE_WINDOWS #ifdef AE_SMP_DEBUGCOUNTERS InterlockedIncrement((LONG volatile *)&_ae_dbg_lock_acquisitions); #endif @@ -3133,12 +4027,12 @@ /************************************************************************ -This function releases lock. +This function releases _lock structure. + +It is low-level lock function which is used by ae_release_lock. ************************************************************************/ -void ae_release_lock(ae_lock *lock) +void _ae_release_lock_raw(_lock *p) { - _lock *p; - p = (_lock*)lock->ptr; #if AE_OS==AE_WINDOWS InterlockedExchange((LONG volatile *)p->p_lock, 0); #elif AE_OS==AE_POSIX @@ -3150,31 +4044,124 @@ /************************************************************************ -This function frees ae_lock structure. +This function frees _lock structure. ************************************************************************/ -void ae_free_lock(ae_lock *lock) +void _ae_free_lock_raw(_lock *p) { - _lock *p; - p = (_lock*)lock->ptr; #if AE_OS==AE_POSIX pthread_mutex_destroy(&p->mutex); #endif - free(p); +} + + +/************************************************************************ +This function initializes ae_lock structure. + +INPUT PARAMETERS: + lock - pointer to lock structure, must be zero-filled + state - pointer to state structure, used for exception + handling and management of automatic objects. + make_automatic - if true, lock object is added to automatic + memory management list. + +NOTE: as a special exception, this function allows you to specify NULL + state pointer. In this case all exception arising during construction + are handled as critical failures, with abort() being called. + make_automatic must be false on such calls. +************************************************************************/ +void ae_init_lock(ae_lock *lock, ae_state *state, ae_bool make_automatic) +{ + _lock *p; + AE_CRITICAL_ASSERT(ae_check_zeros(lock,sizeof(*lock))); + if(state==NULL) + { + ae_state _tmp_state; + AE_CRITICAL_ASSERT(!make_automatic); + ae_state_init(&_tmp_state); + ae_init_lock(lock, &_tmp_state, ae_false); + ae_state_clear(&_tmp_state); + return; + } + lock->eternal = ae_false; + ae_db_init(&lock->db, sizeof(_lock), state, make_automatic); + lock->lock_ptr = lock->db.ptr; + p = (_lock*)lock->lock_ptr; + _ae_init_lock_raw(p); +} + +/************************************************************************ +This function initializes "eternal" ae_lock structure which is expected +to persist until the end of the execution of the program. Eternal locks +can not be deallocated (cleared) and do not increase debug allocation +counters. Errors during allocation of eternal locks are considered +critical exceptions and handled by calling abort(). + +INPUT PARAMETERS: + lock - pointer to lock structure, must be zero-filled + state - pointer to state structure, used for exception + handling and management of automatic objects; + non-NULL. + make_automatic - if true, lock object is added to automatic + memory management list. +************************************************************************/ +void ae_init_lock_eternal(ae_lock *lock) +{ + _lock *p; + AE_CRITICAL_ASSERT(ae_check_zeros(lock,sizeof(*lock))); + lock->eternal = ae_true; + lock->lock_ptr = eternal_malloc(sizeof(_lock)); + p = (_lock*)lock->lock_ptr; + _ae_init_lock_raw(p); +} + + +/************************************************************************ +This function acquires lock. In case lock is busy, we perform several +iterations inside tight loop before trying again. +************************************************************************/ +void ae_acquire_lock(ae_lock *lock) +{ + _lock *p; + p = (_lock*)lock->lock_ptr; + _ae_acquire_lock_raw(p); +} + + +/************************************************************************ +This function releases lock. +************************************************************************/ +void ae_release_lock(ae_lock *lock) +{ + _lock *p; + p = (_lock*)lock->lock_ptr; + _ae_release_lock_raw(p); +} + + +/************************************************************************ +This function frees ae_lock structure. +************************************************************************/ +void ae_free_lock(ae_lock *lock) +{ + _lock *p; + AE_CRITICAL_ASSERT(!lock->eternal); + p = (_lock*)lock->lock_ptr; + if( p!=NULL ) + _ae_free_lock_raw(p); + ae_db_free(&lock->db); } /************************************************************************ This function creates ae_shared_pool structure. -dst destination shared pool; +dst destination shared pool, must be zero-filled already allocated, but not initialized. -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, vector will be registered in the current frame + of the state structure; Error handling: * on failure calls ae_break() with NULL state pointer. Usually it results @@ -3182,11 +4169,13 @@ dst is assumed to be uninitialized, its fields are ignored. ************************************************************************/ -void ae_shared_pool_init(void *_dst, ae_state *state) +void ae_shared_pool_init(void *_dst, ae_state *state, ae_bool make_automatic) { ae_shared_pool *dst; + AE_CRITICAL_ASSERT(state!=NULL); dst = (ae_shared_pool*)_dst; + AE_CRITICAL_ASSERT(ae_check_zeros(dst,sizeof(*dst))); /* init */ dst->seed_object = NULL; @@ -3199,9 +4188,9 @@ dst->destroy = NULL; dst->frame_entry.deallocator = ae_shared_pool_destroy; dst->frame_entry.ptr = dst; - if( state!=NULL ) + if( make_automatic ) ae_db_attach(&dst->frame_entry, state); - ae_init_lock(&dst->pool_lock); + ae_init_lock(&dst->pool_lock, state, ae_false); } @@ -3248,26 +4237,20 @@ /************************************************************************ This function creates copy of ae_shared_pool. -dst destination pool, allocated but not initialized +dst destination pool, must be zero-filled src source pool -state this parameter can be: - * pointer to current instance of ae_state, if you want - to automatically destroy this object after leaving - current frame - * NULL, if you do NOT want this vector to be - automatically managed (say, if it is field of some - object) - -Error handling: -* on failure calls ae_break() with NULL state pointer. Usually it results - in abort() call. +state pointer to current state structure. Can not be NULL. + used for exception handling (say, allocation error results + in longjmp call). +make_automatic if true, vector will be registered in the current frame + of the state structure; dst is assumed to be uninitialized, its fields are ignored. NOTE: this function is NOT thread-safe. It does not acquire pool lock, so you should NOT call it when lock can be used by another thread. ************************************************************************/ -void ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state) +void ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state, ae_bool make_automatic) { ae_shared_pool *dst, *src; ae_shared_pool_entry *ptr; @@ -3277,20 +4260,20 @@ dst = (ae_shared_pool*)_dst; src = (ae_shared_pool*)_src; - ae_shared_pool_init(dst, state); + ae_shared_pool_init(dst, state, make_automatic); /* copy non-pointer fields */ dst->size_of_object = src->size_of_object; dst->init = src->init; dst->init_copy = src->init_copy; dst->destroy = src->destroy; - ae_init_lock(&dst->pool_lock); /* copy seed object */ if( src->seed_object!=NULL ) { dst->seed_object = ae_malloc(dst->size_of_object, state); - dst->init_copy(dst->seed_object, src->seed_object, NULL); + memset(dst->seed_object, 0, dst->size_of_object); + dst->init_copy(dst->seed_object, src->seed_object, state, ae_false); } /* copy recycled objects */ @@ -3298,11 +4281,18 @@ for(ptr=src->recycled_objects; ptr!=NULL; ptr=(ae_shared_pool_entry*)ptr->next_entry) { ae_shared_pool_entry *tmp; + + /* allocate entry, immediately add to the recycled list + (we do not want to lose it in case of future malloc failures) */ tmp = (ae_shared_pool_entry*)ae_malloc(sizeof(ae_shared_pool_entry), state); - tmp->obj = ae_malloc(dst->size_of_object, state); - dst->init_copy(tmp->obj, ptr->obj, NULL); + memset(tmp, 0, sizeof(*tmp)); tmp->next_entry = dst->recycled_objects; dst->recycled_objects = tmp; + + /* prepare place for object, init_copy() it */ + tmp->obj = ae_malloc(dst->size_of_object, state); + memset(tmp->obj, 0, dst->size_of_object); + dst->init_copy(tmp->obj, ptr->obj, state, ae_false); } /* recycled entries are not copied because they do not store any information */ @@ -3318,13 +4308,10 @@ /************************************************************************ -This function clears contents of the pool, but pool remain usable. - -IMPORTANT: this function invalidates dst, it can not be used after it is - cleared. +This function performs destruction of the pool object. NOTE: this function is NOT thread-safe. It does not acquire pool lock, so - you should NOT call it when lock can be used by another thread. + you should NOT call it when pool can be used by another thread. ************************************************************************/ void ae_shared_pool_clear(void *_dst) { @@ -3344,14 +4331,6 @@ dst->destroy = NULL; } - -/************************************************************************ -This function destroys pool (object is left in invalid state, all -dynamically allocated memory is freed). - -NOTE: this function is NOT thread-safe. It does not acquire pool lock, so - you should NOT call it when lock can be used by another thread. -************************************************************************/ void ae_shared_pool_destroy(void *_dst) { ae_shared_pool *dst = (ae_shared_pool*)_dst; @@ -3395,8 +4374,8 @@ ae_shared_pool *dst, void *seed_object, ae_int_t size_of_object, - void (*init)(void* dst, ae_state* state), - void (*init_copy)(void* dst, void* src, ae_state* state), + void (*init)(void* dst, ae_state* state, ae_bool make_automatic), + void (*init_copy)(void* dst, void* src, ae_state* state, ae_bool make_automatic), void (*destroy)(void* ptr), ae_state *state) { @@ -3414,7 +4393,8 @@ /* set seed object */ dst->seed_object = ae_malloc(size_of_object, state); - init_copy(dst->seed_object, seed_object, NULL); + memset(dst->seed_object, 0, size_of_object); + init_copy(dst->seed_object, seed_object, state, ae_false); } @@ -3455,7 +4435,6 @@ /* try to reuse recycled objects */ if( pool->recycled_objects!=NULL ) { - void *new_obj; ae_shared_pool_entry *result; /* retrieve entry/object from list of recycled objects */ @@ -3479,12 +4458,14 @@ /* release lock; we do not need it anymore because copy constructor does not modify source variable */ ae_release_lock(&pool->pool_lock); - /* create new object from seed */ + /* create new object from seed, immediately assign object to smart pointer + (do not want to lose it in case of future failures) */ new_obj = ae_malloc(pool->size_of_object, state); - pool->init_copy(new_obj, pool->seed_object, NULL); - - /* assign object to smart pointer and return */ + memset(new_obj, 0, pool->size_of_object); ae_smart_ptr_assign(pptr, new_obj, ae_true, ae_true, pool->destroy); + + /* perform actual copying; before this line smartptr points to zero-filled instance */ + pool->init_copy(new_obj, pool->seed_object, state, ae_false); } @@ -3525,7 +4506,7 @@ /* acquire lock */ ae_acquire_lock(&pool->pool_lock); - /* acquire shared pool entry (reuse one from recycled_entries or malloc new one) */ + /* acquire shared pool entry (reuse one from recycled_entries or allocate new one) */ if( pool->recycled_entries!=NULL ) { /* reuse previously allocated entry */ @@ -3728,6 +4709,19 @@ serializer->entries_needed++; } +void ae_serializer_alloc_byte_array(ae_serializer *serializer, ae_vector *bytes) +{ + ae_int_t n; + n = bytes->cnt; + n = n/8 + (n%8>0 ? 1 : 0); + serializer->entries_needed += 1+n; +} + +/************************************************************************ +After allocation phase is done, this function returns required size of +the output string buffer (including trailing zero symbol). Actual size of +the data being stored can be a few characters smaller than requested. +************************************************************************/ ae_int_t ae_serializer_get_alloc_size(ae_serializer *serializer) { ae_int_t rows, lastrowsize, result; @@ -3737,7 +4731,7 @@ /* if no entries needes (degenerate case) */ if( serializer->entries_needed==0 ) { - serializer->bytes_asked = 1; + serializer->bytes_asked = 4; /* a pair of chars for \r\n, one for dot, one for trailing zero */ return serializer->bytes_asked; } @@ -3751,9 +4745,11 @@ } /* calculate result size */ - result = ((rows-1)*AE_SER_ENTRIES_PER_ROW+lastrowsize)*AE_SER_ENTRY_LENGTH; - result += (rows-1)*(AE_SER_ENTRIES_PER_ROW-1)+(lastrowsize-1); - result += rows*2; + result = ((rows-1)*AE_SER_ENTRIES_PER_ROW+lastrowsize)*AE_SER_ENTRY_LENGTH; /* data size */ + result += (rows-1)*(AE_SER_ENTRIES_PER_ROW-1)+(lastrowsize-1); /* space symbols */ + result += rows*2; /* newline symbols */ + result += 1; /* trailing dot */ + result += 1; /* trailing zero */ serializer->bytes_asked = result; return result; } @@ -3766,14 +4762,61 @@ serializer->entries_saved = 0; serializer->bytes_written = 0; } -#endif -#ifdef AE_USE_CPP_SERIALIZATION void ae_serializer_ustart_str(ae_serializer *serializer, const std::string *buf) { serializer->mode = AE_SM_FROM_STRING; serializer->in_str = buf->c_str(); } + +static char cpp_writer(const char *p_string, ae_int_t aux) +{ + std::ostream *stream = reinterpret_cast(aux); + stream->write(p_string, strlen(p_string)); + return stream->bad() ? 1 : 0; +} + +static char cpp_reader(ae_int_t aux, ae_int_t cnt, char *p_buf) +{ + std::istream *stream = reinterpret_cast(aux); + int c; + if( cnt<=0 ) + return 1; /* unexpected cnt */ + for(;;) + { + c = stream->get(); + if( c<0 || c>255 ) + return 1; /* failure! */ + if( c!=' ' && c!='\t' && c!='\n' && c!='\r' ) + break; + } + p_buf[0] = (char)c; + for(int k=1; kget(); + if( c<0 || c>255 || c==' ' || c=='\t' || c=='\n' || c=='\r' ) + return 1; /* failure! */ + p_buf[k] = (char)c; + } + p_buf[cnt] = 0; + return 0; /* success */ +} + +void ae_serializer_sstart_stream(ae_serializer *serializer, std::ostream *stream) +{ + serializer->mode = AE_SM_TO_STREAM; + serializer->stream_writer = cpp_writer; + serializer->stream_aux = reinterpret_cast(stream); + serializer->entries_saved = 0; + serializer->bytes_written = 0; +} + +void ae_serializer_ustart_stream(ae_serializer *serializer, const std::istream *stream) +{ + serializer->mode = AE_SM_FROM_STREAM; + serializer->stream_reader = cpp_reader; + serializer->stream_aux = reinterpret_cast(stream); +} #endif void ae_serializer_sstart_str(ae_serializer *serializer, char *buf) @@ -3791,6 +4834,22 @@ serializer->in_str = buf; } +void ae_serializer_sstart_stream(ae_serializer *serializer, ae_stream_writer writer, ae_int_t aux) +{ + serializer->mode = AE_SM_TO_STREAM; + serializer->stream_writer = writer; + serializer->stream_aux = aux; + serializer->entries_saved = 0; + serializer->bytes_written = 0; +} + +void ae_serializer_ustart_stream(ae_serializer *serializer, ae_stream_reader reader, ae_int_t aux) +{ + serializer->mode = AE_SM_FROM_STREAM; + serializer->stream_reader = reader; + serializer->stream_aux = aux; +} + void ae_serializer_serialize_bool(ae_serializer *serializer, ae_bool v, ae_state *state) { char buf[AE_SER_ENTRY_LENGTH+2+1]; @@ -3805,8 +4864,7 @@ else strcat(buf, "\r\n"); bytes_appended = (ae_int_t)strlen(buf); - if( serializer->bytes_written+bytes_appended > serializer->bytes_asked ) - ae_break(state, ERR_ASSERTION_FAILED, emsg); + ae_assert(serializer->bytes_written+bytes_appendedbytes_asked, emsg, state); /* strict "less" because we need space for trailing zero */ serializer->bytes_written += bytes_appended; /* append to buffer */ @@ -3823,6 +4881,11 @@ serializer->out_str += bytes_appended; return; } + if( serializer->mode==AE_SM_TO_STREAM ) + { + ae_assert(serializer->stream_writer(buf, serializer->stream_aux)==0, "serializer: error writing to stream", state); + return; + } ae_break(state, ERR_ASSERTION_FAILED, emsg); } @@ -3840,8 +4903,46 @@ else strcat(buf, "\r\n"); bytes_appended = (ae_int_t)strlen(buf); - if( serializer->bytes_written+bytes_appended > serializer->bytes_asked ) - ae_break(state, ERR_ASSERTION_FAILED, emsg); + ae_assert(serializer->bytes_written+bytes_appendedbytes_asked, emsg, state); /* strict "less" because we need space for trailing zero */ + serializer->bytes_written += bytes_appended; + + /* append to buffer */ +#ifdef AE_USE_CPP_SERIALIZATION + if( serializer->mode==AE_SM_TO_CPPSTRING ) + { + *(serializer->out_cppstr) += buf; + return; + } +#endif + if( serializer->mode==AE_SM_TO_STRING ) + { + strcat(serializer->out_str, buf); + serializer->out_str += bytes_appended; + return; + } + if( serializer->mode==AE_SM_TO_STREAM ) + { + ae_assert(serializer->stream_writer(buf, serializer->stream_aux)==0, "serializer: error writing to stream", state); + return; + } + ae_break(state, ERR_ASSERTION_FAILED, emsg); +} + +void ae_serializer_serialize_int64(ae_serializer *serializer, ae_int64_t v, ae_state *state) +{ + char buf[AE_SER_ENTRY_LENGTH+2+1]; + const char *emsg = "ALGLIB: serialization integrity error"; + ae_int_t bytes_appended; + + /* prepare serialization, check consistency */ + ae_int642str(v, buf, state); + serializer->entries_saved++; + if( serializer->entries_saved%AE_SER_ENTRIES_PER_ROW ) + strcat(buf, " "); + else + strcat(buf, "\r\n"); + bytes_appended = (ae_int_t)strlen(buf); + ae_assert(serializer->bytes_written+bytes_appendedbytes_asked, emsg, state); /* strict "less" because we need space for trailing zero */ serializer->bytes_written += bytes_appended; /* append to buffer */ @@ -3858,6 +4959,11 @@ serializer->out_str += bytes_appended; return; } + if( serializer->mode==AE_SM_TO_STREAM ) + { + ae_assert(serializer->stream_writer(buf, serializer->stream_aux)==0, "serializer: error writing to stream", state); + return; + } ae_break(state, ERR_ASSERTION_FAILED, emsg); } @@ -3875,8 +4981,7 @@ else strcat(buf, "\r\n"); bytes_appended = (ae_int_t)strlen(buf); - if( serializer->bytes_written+bytes_appended > serializer->bytes_asked ) - ae_break(state, ERR_ASSERTION_FAILED, emsg); + ae_assert(serializer->bytes_written+bytes_appendedbytes_asked, emsg, state); /* strict "less" because we need space for trailing zero */ serializer->bytes_written += bytes_appended; /* append to buffer */ @@ -3893,26 +4998,180 @@ serializer->out_str += bytes_appended; return; } + if( serializer->mode==AE_SM_TO_STREAM ) + { + ae_assert(serializer->stream_writer(buf, serializer->stream_aux)==0, "serializer: error writing to stream", state); + return; + } ae_break(state, ERR_ASSERTION_FAILED, emsg); } +void ae_serializer_serialize_byte_array(ae_serializer *serializer, ae_vector *bytes, ae_state *state) +{ + ae_int_t chunk_size, entries_count; + + chunk_size = 8; + + /* save array length */ + ae_serializer_serialize_int(serializer, bytes->cnt, state); + + /* determine entries count */ + entries_count = bytes->cnt/chunk_size + (bytes->cnt%chunk_size>0 ? 1 : 0); + for(ae_int_t eidx=0; eidxcnt - eidx*chunk_size; + elen = elen>chunk_size ? chunk_size : elen; + memset(&tmpi, 0, sizeof(tmpi)); + memmove(&tmpi, bytes->ptr.p_ubyte + eidx*chunk_size, elen); + ae_serializer_serialize_int64(serializer, tmpi, state); + } +} + void ae_serializer_unserialize_bool(ae_serializer *serializer, ae_bool *v, ae_state *state) { - *v = ae_str2bool(serializer->in_str, state, &serializer->in_str); + if( serializer->mode==AE_SM_FROM_STRING ) + { + *v = ae_str2bool(serializer->in_str, state, &serializer->in_str); + return; + } + if( serializer->mode==AE_SM_FROM_STREAM ) + { + char buf[AE_SER_ENTRY_LENGTH+2+1]; + const char *p = buf; + ae_assert(serializer->stream_reader(serializer->stream_aux, AE_SER_ENTRY_LENGTH, buf)==0, "serializer: error reading from stream", state); + *v = ae_str2bool(buf, state, &p); + return; + } + ae_break(state, ERR_ASSERTION_FAILED, "ae_serializer: integrity check failed"); } void ae_serializer_unserialize_int(ae_serializer *serializer, ae_int_t *v, ae_state *state) { - *v = ae_str2int(serializer->in_str, state, &serializer->in_str); + if( serializer->mode==AE_SM_FROM_STRING ) + { + *v = ae_str2int(serializer->in_str, state, &serializer->in_str); + return; + } + if( serializer->mode==AE_SM_FROM_STREAM ) + { + char buf[AE_SER_ENTRY_LENGTH+2+1]; + const char *p = buf; + ae_assert(serializer->stream_reader(serializer->stream_aux, AE_SER_ENTRY_LENGTH, buf)==0, "serializer: error reading from stream", state); + *v = ae_str2int(buf, state, &p); + return; + } + ae_break(state, ERR_ASSERTION_FAILED, "ae_serializer: integrity check failed"); +} + +void ae_serializer_unserialize_int64(ae_serializer *serializer, ae_int64_t *v, ae_state *state) +{ + if( serializer->mode==AE_SM_FROM_STRING ) + { + *v = ae_str2int64(serializer->in_str, state, &serializer->in_str); + return; + } + if( serializer->mode==AE_SM_FROM_STREAM ) + { + char buf[AE_SER_ENTRY_LENGTH+2+1]; + const char *p = buf; + ae_assert(serializer->stream_reader(serializer->stream_aux, AE_SER_ENTRY_LENGTH, buf)==0, "serializer: error reading from stream", state); + *v = ae_str2int64(buf, state, &p); + return; + } + ae_break(state, ERR_ASSERTION_FAILED, "ae_serializer: integrity check failed"); } void ae_serializer_unserialize_double(ae_serializer *serializer, double *v, ae_state *state) { - *v = ae_str2double(serializer->in_str, state, &serializer->in_str); + if( serializer->mode==AE_SM_FROM_STRING ) + { + *v = ae_str2double(serializer->in_str, state, &serializer->in_str); + return; + } + if( serializer->mode==AE_SM_FROM_STREAM ) + { + char buf[AE_SER_ENTRY_LENGTH+2+1]; + const char *p = buf; + ae_assert(serializer->stream_reader(serializer->stream_aux, AE_SER_ENTRY_LENGTH, buf)==0, "serializer: error reading from stream", state); + *v = ae_str2double(buf, state, &p); + return; + } + ae_break(state, ERR_ASSERTION_FAILED, "ae_serializer: integrity check failed"); +} + +void ae_serializer_unserialize_byte_array(ae_serializer *serializer, ae_vector *bytes, ae_state *state) +{ + ae_int_t chunk_size, n, entries_count; + + chunk_size = 8; + + /* read array length, allocate output */ + ae_serializer_unserialize_int(serializer, &n, state); + ae_vector_set_length(bytes, n, state); + + /* determine entries count, read entries */ + entries_count = n/chunk_size + (n%chunk_size>0 ? 1 : 0); + for(ae_int_t eidx=0; eidxchunk_size ? chunk_size : elen; + ae_serializer_unserialize_int64(serializer, &tmp64, state); + memmove(bytes->ptr.p_ubyte+eidx*chunk_size, &tmp64, elen); + } } -void ae_serializer_stop(ae_serializer *serializer) +void ae_serializer_stop(ae_serializer *serializer, ae_state *state) { +#ifdef AE_USE_CPP_SERIALIZATION + if( serializer->mode==AE_SM_TO_CPPSTRING ) + { + ae_assert(serializer->bytes_written+1bytes_asked, "ae_serializer: integrity check failed", state);/* strict "less" because we need space for trailing zero */ + serializer->bytes_written++; + *(serializer->out_cppstr) += "."; + return; + } +#endif + if( serializer->mode==AE_SM_TO_STRING ) + { + ae_assert(serializer->bytes_written+1bytes_asked, "ae_serializer: integrity check failed", state); /* strict "less" because we need space for trailing zero */ + serializer->bytes_written++; + strcat(serializer->out_str, "."); + serializer->out_str += 1; + return; + } + if( serializer->mode==AE_SM_TO_STREAM ) + { + ae_assert(serializer->bytes_written+1bytes_asked, "ae_serializer: integrity check failed", state); /* strict "less" because we need space for trailing zero */ + serializer->bytes_written++; + ae_assert(serializer->stream_writer(".", serializer->stream_aux)==0, "ae_serializer: error writing to stream", state); + return; + } + if( serializer->mode==AE_SM_FROM_STRING ) + { + /* + * because input string may be from pre-3.11 serializer, + * which does not include trailing dot, we do not test + * string for presence of "." symbol. Anyway, because string + * is not stream, we do not have to read ALL trailing symbols. + */ + return; + } + if( serializer->mode==AE_SM_FROM_STREAM ) + { + /* + * Read trailing dot, perform integrity check + */ + char buf[2]; + ae_assert(serializer->stream_reader(serializer->stream_aux, 1, buf)==0, "ae_serializer: error reading from stream", state); + ae_assert(buf[0]=='.', "ae_serializer: trailing . is not found in the stream", state); + return; + } + ae_break(state, ERR_ASSERTION_FAILED, "ae_serializer: integrity check failed"); } @@ -4890,20 +6149,34 @@ /************************************************************************ RComm functions ************************************************************************/ -void _rcommstate_init(rcommstate* p, ae_state *_state) -{ - ae_vector_init(&p->ba, 0, DT_BOOL, _state); - ae_vector_init(&p->ia, 0, DT_INT, _state); - ae_vector_init(&p->ra, 0, DT_REAL, _state); - ae_vector_init(&p->ca, 0, DT_COMPLEX, _state); -} - -void _rcommstate_init_copy(rcommstate* dst, rcommstate* src, ae_state *_state) +void _rcommstate_init(rcommstate* p, ae_state *_state, ae_bool make_automatic) { - ae_vector_init_copy(&dst->ba, &src->ba, _state); - ae_vector_init_copy(&dst->ia, &src->ia, _state); - ae_vector_init_copy(&dst->ra, &src->ra, _state); - ae_vector_init_copy(&dst->ca, &src->ca, _state); + /* initial zero-filling */ + memset(&p->ba, 0, sizeof(p->ba)); + memset(&p->ia, 0, sizeof(p->ia)); + memset(&p->ra, 0, sizeof(p->ra)); + memset(&p->ca, 0, sizeof(p->ca)); + + /* initialization */ + ae_vector_init(&p->ba, 0, DT_BOOL, _state, make_automatic); + ae_vector_init(&p->ia, 0, DT_INT, _state, make_automatic); + ae_vector_init(&p->ra, 0, DT_REAL, _state, make_automatic); + ae_vector_init(&p->ca, 0, DT_COMPLEX, _state, make_automatic); +} + +void _rcommstate_init_copy(rcommstate* dst, rcommstate* src, ae_state *_state, ae_bool make_automatic) +{ + /* initial zero-filling */ + memset(&dst->ba, 0, sizeof(dst->ba)); + memset(&dst->ia, 0, sizeof(dst->ia)); + memset(&dst->ra, 0, sizeof(dst->ra)); + memset(&dst->ca, 0, sizeof(dst->ca)); + + /* initialization */ + ae_vector_init_copy(&dst->ba, &src->ba, _state, make_automatic); + ae_vector_init_copy(&dst->ia, &src->ia, _state, make_automatic); + ae_vector_init_copy(&dst->ra, &src->ra, _state, make_automatic); + ae_vector_init_copy(&dst->ca, &src->ca, _state, make_automatic); dst->stage = src->stage; } @@ -4980,7 +6253,7 @@ } /******************************************************************** -Global and local constants +Global and local constants/variables ********************************************************************/ const double alglib::machineepsilon = 5E-16; const double alglib::maxrealnumber = 1E300; @@ -4989,11 +6262,22 @@ const double alglib::fp_nan = alglib::get_aenv_nan(); const double alglib::fp_posinf = alglib::get_aenv_posinf(); const double alglib::fp_neginf = alglib::get_aenv_neginf(); +#if defined(AE_NO_EXCEPTIONS) +static const char *_alglib_last_error = NULL; +#endif +static const alglib_impl::ae_uint64_t _i64_xdefault = 0x0; +static const alglib_impl::ae_uint64_t _i64_xserial = _ALGLIB_FLG_THREADING_SERIAL; +static const alglib_impl::ae_uint64_t _i64_xparallel = _ALGLIB_FLG_THREADING_PARALLEL; +const alglib::xparams &alglib::xdefault = *((const alglib::xparams *)(&_i64_xdefault)); +const alglib::xparams &alglib::serial = *((const alglib::xparams *)(&_i64_xserial)); +const alglib::xparams &alglib::parallel = *((const alglib::xparams *)(&_i64_xparallel)); + /******************************************************************** -ap_error +Exception handling ********************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) alglib::ap_error::ap_error() { } @@ -5006,15 +6290,36 @@ void alglib::ap_error::make_assertion(bool bClause) { if(!bClause) - throw ap_error(); + _ALGLIB_CPP_EXCEPTION(""); +} + +void alglib::ap_error::make_assertion(bool bClause, const char *p_msg) +{ + if(!bClause) + _ALGLIB_CPP_EXCEPTION(p_msg); +} +#else +void alglib::set_error_flag(const char *s) +{ + if( s==NULL ) + s = "ALGLIB: unknown error"; + _alglib_last_error = s; +} + +bool alglib::get_error_flag(const char **p_msg) +{ + if( _alglib_last_error==NULL ) + return false; + if( p_msg!=NULL ) + *p_msg = _alglib_last_error; + return true; } -void alglib::ap_error::make_assertion(bool bClause, const char *p_msg) -{ - if(!bClause) - throw ap_error(p_msg); +void alglib::clear_error_flag() +{ + _alglib_last_error = NULL; } - +#endif /******************************************************************** Complex number with double precision. @@ -5130,6 +6435,7 @@ return (const alglib_impl::ae_complex*)this; } +#if !defined(AE_NO_EXCEPTIONS) std::string alglib::complex::tostring(int _dps) const { char mask[32]; @@ -5138,7 +6444,7 @@ char buf_zero[32]; int dps = _dps>=0 ? _dps : -_dps; if( dps<=0 || dps>=20 ) - throw ap_error("complex::tostring(): incorrect dps"); + _ALGLIB_CPP_EXCEPTION("complex::tostring(): incorrect dps"); // handle IEEE special quantities if( fp_isnan(x) || fp_isnan(y) ) @@ -5148,15 +6454,15 @@ // generate mask if( sprintf(mask, "%%.%d%s", dps, _dps>=0 ? "f" : "e")>=(int)sizeof(mask) ) - throw ap_error("complex::tostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("complex::tostring(): buffer overflow"); // print |x|, |y| and zero with same mask and compare if( sprintf(buf_x, mask, (double)(fabs(x)))>=(int)sizeof(buf_x) ) - throw ap_error("complex::tostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("complex::tostring(): buffer overflow"); if( sprintf(buf_y, mask, (double)(fabs(y)))>=(int)sizeof(buf_y) ) - throw ap_error("complex::tostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("complex::tostring(): buffer overflow"); if( sprintf(buf_zero, mask, (double)0)>=(int)sizeof(buf_zero) ) - throw ap_error("complex::tostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("complex::tostring(): buffer overflow"); // different zero/nonzero patterns if( strcmp(buf_x,buf_zero)!=0 && strcmp(buf_y,buf_zero)!=0 ) @@ -5167,8 +6473,9 @@ return std::string(y>0 ? "" : "-")+buf_y+"i"; return std::string("0"); } +#endif -const bool alglib::operator==(const alglib::complex& lhs, const alglib::complex& rhs) +bool alglib::operator==(const alglib::complex& lhs, const alglib::complex& rhs) { volatile double x1 = lhs.x; volatile double x2 = rhs.x; @@ -5177,7 +6484,7 @@ return x1==x2 && y1==y2; } -const bool alglib::operator!=(const alglib::complex& lhs, const alglib::complex& rhs) +bool alglib::operator!=(const alglib::complex& lhs, const alglib::complex& rhs) { return !(lhs==rhs); } const alglib::complex alglib::operator+(const alglib::complex& lhs) @@ -5293,6 +6600,48 @@ #endif } +void alglib::setglobalthreading(const alglib::xparams settings) +{ +#ifdef AE_HPC + alglib_impl::ae_set_global_threading(settings.flags); +#endif +} + +alglib::ae_int_t alglib::getnworkers() +{ +#ifdef AE_HPC + return alglib_impl::ae_get_cores_to_use(); +#else + return 1; +#endif +} + +alglib::ae_int_t alglib::_ae_cores_count() +{ +#ifdef AE_HPC + return alglib_impl::ae_cores_count(); +#else + return 1; +#endif +} + +void alglib::_ae_set_global_threading(alglib_impl::ae_uint64_t flg_value) +{ +#ifdef AE_HPC + alglib_impl::ae_set_global_threading(flg_value); +#endif +} + +alglib_impl::ae_uint64_t alglib::_ae_get_global_threading() +{ +#ifdef AE_HPC + return alglib_impl::ae_get_global_threading(); +#else + return _ALGLIB_FLG_THREADING_SERIAL; +#endif +} + + /******************************************************************** Level 1 BLAS functions ********************************************************************/ @@ -6149,98 +7498,205 @@ vmul(vdst, 1, N, alpha); } +alglib::ae_int_t alglib::vlen(ae_int_t n1, ae_int_t n2) +{ + return n2-n1+1; +} + /******************************************************************** Matrices and vectors ********************************************************************/ -alglib::ae_vector_wrapper::ae_vector_wrapper() +alglib::ae_vector_wrapper::ae_vector_wrapper(alglib_impl::ae_vector *e_ptr, alglib_impl::ae_datatype datatype) +{ + if( e_ptr==NULL || e_ptr->datatype!=datatype ) + { + const char *msg = "ALGLIB: ae_vector_wrapper datatype check failed"; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(msg); + return; +#endif + } + ptr = e_ptr; + is_frozen_proxy = true; +} + +alglib::ae_vector_wrapper::ae_vector_wrapper(alglib_impl::ae_datatype datatype) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + ptr = &inner_vec; + is_frozen_proxy = false; + memset(ptr, 0, sizeof(*ptr)); + ae_vector_init(ptr, 0, datatype, &_state, ae_false); + ae_state_clear(&_state); +} + +alglib::ae_vector_wrapper::ae_vector_wrapper(const ae_vector_wrapper &rhs, alglib_impl::ae_datatype datatype) { - p_vec = NULL; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(rhs.ptr!=NULL, "ALGLIB: ae_vector_wrapper source is not initialized", &_state); + alglib_impl::ae_assert(rhs.ptr->datatype==datatype, "ALGLIB: ae_vector_wrapper datatype check failed", &_state); + ptr = &inner_vec; + is_frozen_proxy = false; + memset(ptr, 0, sizeof(*ptr)); + ae_vector_init_copy(ptr, rhs.ptr, &_state, ae_false); + ae_state_clear(&_state); } alglib::ae_vector_wrapper::~ae_vector_wrapper() { - if( p_vec==&vec ) - ae_vector_clear(p_vec); + if( ptr==&inner_vec ) + ae_vector_clear(ptr); } void alglib::ae_vector_wrapper::setlength(ae_int_t iLen) -{ - if( p_vec==NULL ) - throw alglib::ap_error("ALGLIB: setlength() error, p_vec==NULL (array was not correctly initialized)"); - if( p_vec!=&vec ) - throw alglib::ap_error("ALGLIB: setlength() error, p_vec!=&vec (attempt to resize frozen array)"); - if( !ae_vector_set_length(p_vec, iLen, NULL) ) - throw alglib::ap_error("ALGLIB: malloc error"); +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(ptr!=NULL, "ALGLIB: setlength() error, ptr==NULL (array was not correctly initialized)", &_state); + alglib_impl::ae_assert(!is_frozen_proxy, "ALGLIB: setlength() error, ptr is frozen proxy array", &_state); + alglib_impl::ae_vector_set_length(ptr, iLen, &_state); + alglib_impl::ae_state_clear(&_state); } alglib::ae_int_t alglib::ae_vector_wrapper::length() const { - if( p_vec==NULL ) + if( ptr==NULL ) return 0; - return p_vec->cnt; + return ptr->cnt; } -void alglib::ae_vector_wrapper::attach_to(alglib_impl::ae_vector *ptr) +void alglib::ae_vector_wrapper::attach_to(alglib_impl::x_vector *new_ptr, alglib_impl::ae_state *_state) { - if( ptr==&vec ) - throw alglib::ap_error("ALGLIB: attempt to attach vector to itself"); - if( p_vec==&vec ) - ae_vector_clear(p_vec); - p_vec = ptr; + if( ptr==&inner_vec ) + ae_vector_clear(ptr); + ptr = &inner_vec; + memset(ptr, 0, sizeof(*ptr)); + ae_vector_init_attach_to_x(ptr, new_ptr, _state, ae_false); + is_frozen_proxy = true; } -void alglib::ae_vector_wrapper::allocate_own(ae_int_t size, alglib_impl::ae_datatype datatype) +const alglib::ae_vector_wrapper& alglib::ae_vector_wrapper::assign(const alglib::ae_vector_wrapper &rhs) { - if( p_vec==&vec ) - ae_vector_clear(p_vec); - p_vec = &vec; - ae_vector_init(p_vec, size, datatype, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + if( this==&rhs ) + return *this; + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + ae_assert(ptr!=NULL, "ALGLIB: incorrect assignment (uninitialized destination)", &_state); + ae_assert(rhs.ptr!=NULL, "ALGLIB: incorrect assignment (uninitialized source)", &_state); + ae_assert(rhs.ptr->datatype==ptr->datatype, "ALGLIB: incorrect assignment to array (types do not match)", &_state); + if( is_frozen_proxy ) + ae_assert(rhs.ptr->cnt==ptr->cnt, "ALGLIB: incorrect assignment to proxy array (sizes do not match)", &_state); + if( rhs.ptr->cnt!=ptr->cnt ) + ae_vector_set_length(ptr, rhs.ptr->cnt, &_state); + memcpy(ptr->ptr.p_ptr, rhs.ptr->ptr.p_ptr, ptr->cnt*alglib_impl::ae_sizeof(ptr->datatype)); + alglib_impl::ae_state_clear(&_state); + return *this; } const alglib_impl::ae_vector* alglib::ae_vector_wrapper::c_ptr() const { - return p_vec; + return ptr; } alglib_impl::ae_vector* alglib::ae_vector_wrapper::c_ptr() { - return p_vec; -} - -void alglib::ae_vector_wrapper::create(const alglib::ae_vector_wrapper &rhs) -{ - if( rhs.p_vec!=NULL ) - { - p_vec = &vec; - ae_vector_init_copy(p_vec, rhs.p_vec, NULL); - } - else - p_vec = NULL; + return ptr; } -void alglib::ae_vector_wrapper::create(const char *s, alglib_impl::ae_datatype datatype) +#if !defined(AE_NO_EXCEPTIONS) +alglib::ae_vector_wrapper::ae_vector_wrapper(const char *s, alglib_impl::ae_datatype datatype) { std::vector svec; size_t i; char *p = filter_spaces(s); + if( p==NULL ) + _ALGLIB_CPP_EXCEPTION("ALGLIB: allocation error"); try { str_vector_create(p, true, &svec); - allocate_own((ae_int_t)(svec.size()), datatype); + { + jmp_buf _break_jump; + alglib_impl::ae_state _state; + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + ptr = &inner_vec; + is_frozen_proxy = false; + memset(ptr, 0, sizeof(*ptr)); + ae_vector_init(ptr, (ae_int_t)(svec.size()), datatype, &_state, ae_false); + ae_state_clear(&_state); + } for(i=0; iptr.p_bool[i] = parse_bool_delim(svec[i],",]"); + ptr->ptr.p_bool[i] = parse_bool_delim(svec[i],",]"); if( datatype==alglib_impl::DT_INT ) - p_vec->ptr.p_int[i] = parse_int_delim(svec[i],",]"); + ptr->ptr.p_int[i] = parse_int_delim(svec[i],",]"); if( datatype==alglib_impl::DT_REAL ) - p_vec->ptr.p_double[i] = parse_real_delim(svec[i],",]"); + ptr->ptr.p_double[i] = parse_real_delim(svec[i],",]"); if( datatype==alglib_impl::DT_COMPLEX ) { alglib::complex t = parse_complex_delim(svec[i],",]"); - p_vec->ptr.p_complex[i].x = t.x; - p_vec->ptr.p_complex[i].y = t.y; + ptr->ptr.p_complex[i].x = t.x; + ptr->ptr.p_complex[i].y = t.y; } } alglib_impl::ae_free(p); @@ -6251,65 +7707,23 @@ throw; } } - -void alglib::ae_vector_wrapper::assign(const alglib::ae_vector_wrapper &rhs) -{ - if( this==&rhs ) - return; - if( p_vec==&vec || p_vec==NULL ) - { - // - // Assignment to non-proxy object - // - ae_vector_clear(p_vec); - if( rhs.p_vec!=NULL ) - { - p_vec = &vec; - ae_vector_init_copy(p_vec, rhs.p_vec, NULL); - } - else - p_vec = NULL; - } - else - { - // - // Assignment to proxy object - // - if( rhs.p_vec==NULL ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (sizes dont match)"); - if( rhs.p_vec->datatype!=p_vec->datatype ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (types dont match)"); - if( rhs.p_vec->cnt!=p_vec->cnt ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (sizes dont match)"); - memcpy(p_vec->ptr.p_ptr, rhs.p_vec->ptr.p_ptr, p_vec->cnt*alglib_impl::ae_sizeof(p_vec->datatype)); - } -} +#endif -alglib::boolean_1d_array::boolean_1d_array() +alglib::boolean_1d_array::boolean_1d_array():ae_vector_wrapper(alglib_impl::DT_BOOL) { - allocate_own(0, alglib_impl::DT_BOOL); } -alglib::boolean_1d_array::boolean_1d_array(const char *s) +alglib::boolean_1d_array::boolean_1d_array(const alglib::boolean_1d_array &rhs):ae_vector_wrapper(rhs,alglib_impl::DT_BOOL) { - create(s, alglib_impl::DT_BOOL); } -alglib::boolean_1d_array::boolean_1d_array(const alglib::boolean_1d_array &rhs) +alglib::boolean_1d_array::boolean_1d_array(alglib_impl::ae_vector *p):ae_vector_wrapper(p,alglib_impl::DT_BOOL) { - create(rhs); -} - -alglib::boolean_1d_array::boolean_1d_array(alglib_impl::ae_vector *p) -{ - p_vec = NULL; - attach_to(p); } const alglib::boolean_1d_array& alglib::boolean_1d_array::operator=(const alglib::boolean_1d_array &rhs) { - assign(rhs); - return *this; + return static_cast(assign(rhs)); } alglib::boolean_1d_array::~boolean_1d_array() @@ -6318,40 +7732,51 @@ const ae_bool& alglib::boolean_1d_array::operator()(ae_int_t i) const { - return p_vec->ptr.p_bool[i]; + return ptr->ptr.p_bool[i]; } ae_bool& alglib::boolean_1d_array::operator()(ae_int_t i) { - return p_vec->ptr.p_bool[i]; + return ptr->ptr.p_bool[i]; } const ae_bool& alglib::boolean_1d_array::operator[](ae_int_t i) const { - return p_vec->ptr.p_bool[i]; + return ptr->ptr.p_bool[i]; } ae_bool& alglib::boolean_1d_array::operator[](ae_int_t i) { - return p_vec->ptr.p_bool[i]; + return ptr->ptr.p_bool[i]; } void alglib::boolean_1d_array::setcontent(ae_int_t iLen, const bool *pContent ) { ae_int_t i; + + // setlength, with exception-free error handling fallback code setlength(iLen); + if( ptr==NULL || ptr->cnt!=iLen ) + return; + + // copy for(i=0; iptr.p_bool[i] = pContent[i]; + ptr->ptr.p_bool[i] = pContent[i]; } ae_bool* alglib::boolean_1d_array::getcontent() { - return p_vec->ptr.p_bool; + return ptr->ptr.p_bool; } const ae_bool* alglib::boolean_1d_array::getcontent() const { - return p_vec->ptr.p_bool; + return ptr->ptr.p_bool; +} + +#if !defined(AE_NO_EXCEPTIONS) +alglib::boolean_1d_array::boolean_1d_array(const char *s):ae_vector_wrapper(s, alglib_impl::DT_BOOL) +{ } std::string alglib::boolean_1d_array::tostring() const @@ -6360,32 +7785,23 @@ return "[]"; return arraytostring(&(operator()(0)), length()); } +#endif -alglib::integer_1d_array::integer_1d_array() -{ - allocate_own(0, alglib_impl::DT_INT); -} - -alglib::integer_1d_array::integer_1d_array(alglib_impl::ae_vector *p) +alglib::integer_1d_array::integer_1d_array():ae_vector_wrapper(alglib_impl::DT_INT) { - p_vec = NULL; - attach_to(p); } -alglib::integer_1d_array::integer_1d_array(const char *s) +alglib::integer_1d_array::integer_1d_array(alglib_impl::ae_vector *p):ae_vector_wrapper(p,alglib_impl::DT_INT) { - create(s, alglib_impl::DT_INT); } -alglib::integer_1d_array::integer_1d_array(const alglib::integer_1d_array &rhs) +alglib::integer_1d_array::integer_1d_array(const alglib::integer_1d_array &rhs):ae_vector_wrapper(rhs,alglib_impl::DT_INT) { - create(rhs); } const alglib::integer_1d_array& alglib::integer_1d_array::operator=(const alglib::integer_1d_array &rhs) { - assign(rhs); - return *this; + return static_cast(assign(rhs)); } alglib::integer_1d_array::~integer_1d_array() @@ -6394,40 +7810,51 @@ const alglib::ae_int_t& alglib::integer_1d_array::operator()(ae_int_t i) const { - return p_vec->ptr.p_int[i]; + return ptr->ptr.p_int[i]; } alglib::ae_int_t& alglib::integer_1d_array::operator()(ae_int_t i) { - return p_vec->ptr.p_int[i]; + return ptr->ptr.p_int[i]; } const alglib::ae_int_t& alglib::integer_1d_array::operator[](ae_int_t i) const { - return p_vec->ptr.p_int[i]; + return ptr->ptr.p_int[i]; } alglib::ae_int_t& alglib::integer_1d_array::operator[](ae_int_t i) { - return p_vec->ptr.p_int[i]; + return ptr->ptr.p_int[i]; } void alglib::integer_1d_array::setcontent(ae_int_t iLen, const ae_int_t *pContent ) { ae_int_t i; + + // setlength(), handle possible exception-free errors setlength(iLen); + if( ptr==NULL || ptr->cnt!=iLen ) + return; + + // copy for(i=0; iptr.p_int[i] = pContent[i]; + ptr->ptr.p_int[i] = pContent[i]; } alglib::ae_int_t* alglib::integer_1d_array::getcontent() { - return p_vec->ptr.p_int; + return ptr->ptr.p_int; } const alglib::ae_int_t* alglib::integer_1d_array::getcontent() const { - return p_vec->ptr.p_int; + return ptr->ptr.p_int; +} + +#if !defined(AE_NO_EXCEPTIONS) +alglib::integer_1d_array::integer_1d_array(const char *s):ae_vector_wrapper(s, alglib_impl::DT_INT) +{ } std::string alglib::integer_1d_array::tostring() const @@ -6436,32 +7863,23 @@ return "[]"; return arraytostring(&operator()(0), length()); } +#endif -alglib::real_1d_array::real_1d_array() -{ - allocate_own(0, alglib_impl::DT_REAL); -} - -alglib::real_1d_array::real_1d_array(alglib_impl::ae_vector *p) +alglib::real_1d_array::real_1d_array():ae_vector_wrapper(alglib_impl::DT_REAL) { - p_vec = NULL; - attach_to(p); } -alglib::real_1d_array::real_1d_array(const char *s) +alglib::real_1d_array::real_1d_array(alglib_impl::ae_vector *p):ae_vector_wrapper(p,alglib_impl::DT_REAL) { - create(s, alglib_impl::DT_REAL); } -alglib::real_1d_array::real_1d_array(const alglib::real_1d_array &rhs) +alglib::real_1d_array::real_1d_array(const alglib::real_1d_array &rhs):ae_vector_wrapper(rhs,alglib_impl::DT_REAL) { - create(rhs); } const alglib::real_1d_array& alglib::real_1d_array::operator=(const alglib::real_1d_array &rhs) { - assign(rhs); - return *this; + return static_cast(assign(rhs)); } alglib::real_1d_array::~real_1d_array() @@ -6470,40 +7888,81 @@ const double& alglib::real_1d_array::operator()(ae_int_t i) const { - return p_vec->ptr.p_double[i]; + return ptr->ptr.p_double[i]; } double& alglib::real_1d_array::operator()(ae_int_t i) { - return p_vec->ptr.p_double[i]; + return ptr->ptr.p_double[i]; } const double& alglib::real_1d_array::operator[](ae_int_t i) const { - return p_vec->ptr.p_double[i]; + return ptr->ptr.p_double[i]; } double& alglib::real_1d_array::operator[](ae_int_t i) { - return p_vec->ptr.p_double[i]; + return ptr->ptr.p_double[i]; } void alglib::real_1d_array::setcontent(ae_int_t iLen, const double *pContent ) { ae_int_t i; + + // setlength(), handle possible exception-free errors setlength(iLen); + if( ptr==NULL || ptr->cnt!=iLen ) + return; + + // copy for(i=0; iptr.p_double[i] = pContent[i]; + ptr->ptr.p_double[i] = pContent[i]; +} + +void alglib::real_1d_array::attach_to_ptr(ae_int_t iLen, double *pContent ) // TODO: convert to constructor!!!!!!! +{ + alglib_impl::x_vector x; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(!is_frozen_proxy, "ALGLIB: unable to attach proxy object to something else", &_state); + alglib_impl::ae_assert(iLen>0, "ALGLIB: non-positive length for attach_to_ptr()", &_state); + x.cnt = iLen; + x.datatype = alglib_impl::DT_REAL; + x.owner = alglib_impl::OWN_CALLER; + x.last_action = alglib_impl::ACT_UNCHANGED; + x.x_ptr.p_ptr = pContent; + attach_to(&x, &_state); + ae_state_clear(&_state); } double* alglib::real_1d_array::getcontent() { - return p_vec->ptr.p_double; + return ptr->ptr.p_double; } const double* alglib::real_1d_array::getcontent() const { - return p_vec->ptr.p_double; + return ptr->ptr.p_double; +} + +#if !defined(AE_NO_EXCEPTIONS) +alglib::real_1d_array::real_1d_array(const char *s):ae_vector_wrapper(s, alglib_impl::DT_REAL) +{ } std::string alglib::real_1d_array::tostring(int dps) const @@ -6512,32 +7971,23 @@ return "[]"; return arraytostring(&operator()(0), length(), dps); } +#endif -alglib::complex_1d_array::complex_1d_array() -{ - allocate_own(0, alglib_impl::DT_COMPLEX); -} - -alglib::complex_1d_array::complex_1d_array(alglib_impl::ae_vector *p) +alglib::complex_1d_array::complex_1d_array():ae_vector_wrapper(alglib_impl::DT_COMPLEX) { - p_vec = NULL; - attach_to(p); } -alglib::complex_1d_array::complex_1d_array(const char *s) +alglib::complex_1d_array::complex_1d_array(alglib_impl::ae_vector *p):ae_vector_wrapper(p,alglib_impl::DT_COMPLEX) { - create(s, alglib_impl::DT_COMPLEX); } -alglib::complex_1d_array::complex_1d_array(const alglib::complex_1d_array &rhs) +alglib::complex_1d_array::complex_1d_array(const alglib::complex_1d_array &rhs):ae_vector_wrapper(rhs,alglib_impl::DT_COMPLEX) { - create(rhs); } const alglib::complex_1d_array& alglib::complex_1d_array::operator=(const alglib::complex_1d_array &rhs) { - assign(rhs); - return *this; + return static_cast(assign(rhs)); } alglib::complex_1d_array::~complex_1d_array() @@ -6546,43 +7996,54 @@ const alglib::complex& alglib::complex_1d_array::operator()(ae_int_t i) const { - return *((const alglib::complex*)(p_vec->ptr.p_complex+i)); + return *((const alglib::complex*)(ptr->ptr.p_complex+i)); } alglib::complex& alglib::complex_1d_array::operator()(ae_int_t i) { - return *((alglib::complex*)(p_vec->ptr.p_complex+i)); + return *((alglib::complex*)(ptr->ptr.p_complex+i)); } const alglib::complex& alglib::complex_1d_array::operator[](ae_int_t i) const { - return *((const alglib::complex*)(p_vec->ptr.p_complex+i)); + return *((const alglib::complex*)(ptr->ptr.p_complex+i)); } alglib::complex& alglib::complex_1d_array::operator[](ae_int_t i) { - return *((alglib::complex*)(p_vec->ptr.p_complex+i)); + return *((alglib::complex*)(ptr->ptr.p_complex+i)); } void alglib::complex_1d_array::setcontent(ae_int_t iLen, const alglib::complex *pContent ) { ae_int_t i; + + // setlength(), handle possible exception-free errors setlength(iLen); + if( ptr==NULL || ptr->cnt!=iLen ) + return; + + // copy for(i=0; iptr.p_complex[i].x = pContent[i].x; - p_vec->ptr.p_complex[i].y = pContent[i].y; + ptr->ptr.p_complex[i].x = pContent[i].x; + ptr->ptr.p_complex[i].y = pContent[i].y; } } alglib::complex* alglib::complex_1d_array::getcontent() { - return (alglib::complex*)p_vec->ptr.p_complex; + return (alglib::complex*)ptr->ptr.p_complex; } const alglib::complex* alglib::complex_1d_array::getcontent() const { - return (const alglib::complex*)p_vec->ptr.p_complex; + return (const alglib::complex*)ptr->ptr.p_complex; +} + +#if !defined(AE_NO_EXCEPTIONS) +alglib::complex_1d_array::complex_1d_array(const char *s):ae_vector_wrapper(s, alglib_impl::DT_COMPLEX) +{ } std::string alglib::complex_1d_array::tostring(int dps) const @@ -6591,65 +8052,131 @@ return "[]"; return arraytostring(&operator()(0), length(), dps); } +#endif -alglib::ae_matrix_wrapper::ae_matrix_wrapper() +alglib::ae_matrix_wrapper::ae_matrix_wrapper(alglib_impl::ae_matrix *e_ptr, alglib_impl::ae_datatype datatype) { - p_mat = NULL; + if( e_ptr->datatype!=datatype ) + { + const char *msg = "ALGLIB: ae_vector_wrapper datatype check failed"; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(msg); + return; +#endif + } + ptr = e_ptr; + is_frozen_proxy = true; } -alglib::ae_matrix_wrapper::~ae_matrix_wrapper() +alglib::ae_matrix_wrapper::ae_matrix_wrapper(alglib_impl::ae_datatype datatype) { - if( p_mat==&mat ) - ae_matrix_clear(p_mat); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + ptr = &inner_mat; + is_frozen_proxy = false; + memset(ptr, 0, sizeof(*ptr)); + ae_matrix_init(ptr, 0, 0, datatype, &_state, ae_false); + ae_state_clear(&_state); + } -const alglib::ae_matrix_wrapper& alglib::ae_matrix_wrapper::operator=(const alglib::ae_matrix_wrapper &rhs) +alglib::ae_matrix_wrapper::ae_matrix_wrapper(const ae_matrix_wrapper &rhs, alglib_impl::ae_datatype datatype) { - assign(rhs); - return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + is_frozen_proxy = false; + ptr = NULL; + alglib_impl::ae_assert(rhs.ptr->datatype==datatype, "ALGLIB: ae_matrix_wrapper datatype check failed", &_state); + if( rhs.ptr!=NULL ) + { + ptr = &inner_mat; + memset(ptr, 0, sizeof(*ptr)); + ae_matrix_init_copy(ptr, rhs.ptr, &_state, ae_false); + } + ae_state_clear(&_state); } -void alglib::ae_matrix_wrapper::create(const ae_matrix_wrapper &rhs) +alglib::ae_matrix_wrapper::~ae_matrix_wrapper() { - if( rhs.p_mat!=NULL ) - { - p_mat = &mat; - ae_matrix_init_copy(p_mat, rhs.p_mat, NULL); - } - else - p_mat = NULL; + if( ptr==&inner_mat ) + ae_matrix_clear(ptr); } -void alglib::ae_matrix_wrapper::create(const char *s, alglib_impl::ae_datatype datatype) +#if !defined(AE_NO_EXCEPTIONS) +alglib::ae_matrix_wrapper::ae_matrix_wrapper(const char *s, alglib_impl::ae_datatype datatype) { std::vector< std::vector > smat; size_t i, j; char *p = filter_spaces(s); + if( p==NULL ) + _ALGLIB_CPP_EXCEPTION("ALGLIB: allocation error"); try { str_matrix_create(p, &smat); - if( smat.size()!=0 ) { - allocate_own((ae_int_t)(smat.size()), (ae_int_t)(smat[0].size()), datatype); - for(i=0; iptr.pp_bool[i][j] = parse_bool_delim(smat[i][j],",]"); + if( datatype==alglib_impl::DT_INT ) + ptr->ptr.pp_int[i][j] = parse_int_delim(smat[i][j],",]"); + if( datatype==alglib_impl::DT_REAL ) + ptr->ptr.pp_double[i][j] = parse_real_delim(smat[i][j],",]"); + if( datatype==alglib_impl::DT_COMPLEX ) { - if( datatype==alglib_impl::DT_BOOL ) - p_mat->ptr.pp_bool[i][j] = parse_bool_delim(smat[i][j],",]"); - if( datatype==alglib_impl::DT_INT ) - p_mat->ptr.pp_int[i][j] = parse_int_delim(smat[i][j],",]"); - if( datatype==alglib_impl::DT_REAL ) - p_mat->ptr.pp_double[i][j] = parse_real_delim(smat[i][j],",]"); - if( datatype==alglib_impl::DT_COMPLEX ) - { - alglib::complex t = parse_complex_delim(smat[i][j],",]"); - p_mat->ptr.pp_complex[i][j].x = t.x; - p_mat->ptr.pp_complex[i][j].y = t.y; - } + alglib::complex t = parse_complex_delim(smat[i][j],",]"); + ptr->ptr.pp_complex[i][j].x = t.x; + ptr->ptr.pp_complex[i][j].y = t.y; } - } - else - allocate_own(0, 0, datatype); + } alglib_impl::ae_free(p); } catch(...) @@ -6658,66 +8185,41 @@ throw; } } - -void alglib::ae_matrix_wrapper::assign(const alglib::ae_matrix_wrapper &rhs) -{ - if( this==&rhs ) - return; - if( p_mat==&mat || p_mat==NULL ) - { - // - // Assignment to non-proxy object - // - ae_matrix_clear(p_mat); - if( rhs.p_mat!=NULL ) - { - p_mat = &mat; - ae_matrix_init_copy(p_mat, rhs.p_mat, NULL); - } - else - p_mat = NULL; - } - else +#endif + +void alglib::ae_matrix_wrapper::setlength(ae_int_t rows, ae_int_t cols) // TODO: automatic allocation of NULL ptr!!!!! +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - // - // Assignment to proxy object - // - ae_int_t i; - if( rhs.p_mat==NULL ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (sizes dont match)"); - if( rhs.p_mat->datatype!=p_mat->datatype ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (types dont match)"); - if( rhs.p_mat->rows!=p_mat->rows ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (sizes dont match)"); - if( rhs.p_mat->cols!=p_mat->cols ) - throw alglib::ap_error("ALGLIB: incorrect assignment to array (sizes dont match)"); - for(i=0; irows; i++) - memcpy(p_mat->ptr.pp_void[i], rhs.p_mat->ptr.pp_void[i], p_mat->cols*alglib_impl::ae_sizeof(p_mat->datatype)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } -} - -void alglib::ae_matrix_wrapper::setlength(ae_int_t rows, ae_int_t cols) -{ - if( p_mat==NULL ) - throw alglib::ap_error("ALGLIB: setlength() error, p_mat==NULL (array was not correctly initialized)"); - if( p_mat!=&mat ) - throw alglib::ap_error("ALGLIB: setlength() error, p_mat!=&mat (attempt to resize frozen array)"); - if( !ae_matrix_set_length(p_mat, rows, cols, NULL) ) - throw alglib::ap_error("ALGLIB: malloc error"); + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(ptr!=NULL, "ALGLIB: setlength() error, p_mat==NULL (array was not correctly initialized)", &_state); + alglib_impl::ae_assert(!is_frozen_proxy, "ALGLIB: setlength() error, attempt to resize proxy array", &_state); + alglib_impl::ae_matrix_set_length(ptr, rows, cols, &_state); + alglib_impl::ae_state_clear(&_state); } alglib::ae_int_t alglib::ae_matrix_wrapper::rows() const { - if( p_mat==NULL ) + if( ptr==NULL ) return 0; - return p_mat->rows; + return ptr->rows; } alglib::ae_int_t alglib::ae_matrix_wrapper::cols() const { - if( p_mat==NULL ) + if( ptr==NULL ) return 0; - return p_mat->cols; + return ptr->cols; } bool alglib::ae_matrix_wrapper::isempty() const @@ -6727,90 +8229,124 @@ alglib::ae_int_t alglib::ae_matrix_wrapper::getstride() const { - if( p_mat==NULL ) + if( ptr==NULL ) return 0; - return p_mat->stride; + return ptr->stride; } -void alglib::ae_matrix_wrapper::attach_to(alglib_impl::ae_matrix *ptr) +void alglib::ae_matrix_wrapper::attach_to(alglib_impl::x_matrix *new_ptr, alglib_impl::ae_state *_state) { - if( ptr==&mat ) - throw alglib::ap_error("ALGLIB: attempt to attach matrix to itself"); - if( p_mat==&mat ) - ae_matrix_clear(p_mat); - p_mat = ptr; + if( ptr==&inner_mat ) + ae_matrix_clear(ptr); + ptr = &inner_mat; + memset(ptr, 0, sizeof(*ptr)); + ae_matrix_init_attach_to_x(ptr, new_ptr, _state, ae_false); + is_frozen_proxy = true; } - -void alglib::ae_matrix_wrapper::allocate_own(ae_int_t rows, ae_int_t cols, alglib_impl::ae_datatype datatype) + +const alglib::ae_matrix_wrapper& alglib::ae_matrix_wrapper::assign(const alglib::ae_matrix_wrapper &rhs) { - if( p_mat==&mat ) - ae_matrix_clear(p_mat); - p_mat = &mat; - ae_matrix_init(p_mat, rows, cols, datatype, NULL); + ae_int_t i; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + if( this==&rhs ) + return *this; + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + ae_assert(ptr!=NULL, "ALGLIB: incorrect assignment to matrix (uninitialized destination)", &_state); + ae_assert(rhs.ptr!=NULL, "ALGLIB: incorrect assignment to array (uninitialized source)", &_state); + ae_assert(rhs.ptr->datatype==ptr->datatype, "ALGLIB: incorrect assignment to array (types dont match)", &_state); + if( is_frozen_proxy ) + { + ae_assert(rhs.ptr->rows==ptr->rows, "ALGLIB: incorrect assignment to proxy array (sizes dont match)", &_state); + ae_assert(rhs.ptr->cols==ptr->cols, "ALGLIB: incorrect assignment to proxy array (sizes dont match)", &_state); + } + if( (rhs.ptr->rows!=ptr->rows) || (rhs.ptr->cols!=ptr->cols) ) + ae_matrix_set_length(ptr, rhs.ptr->rows, rhs.ptr->cols, &_state); + for(i=0; irows; i++) + memcpy(ptr->ptr.pp_void[i], rhs.ptr->ptr.pp_void[i], ptr->cols*alglib_impl::ae_sizeof(ptr->datatype)); + alglib_impl::ae_state_clear(&_state); + return *this; } const alglib_impl::ae_matrix* alglib::ae_matrix_wrapper::c_ptr() const { - return p_mat; + return ptr; } alglib_impl::ae_matrix* alglib::ae_matrix_wrapper::c_ptr() { - return p_mat; + return ptr; } -alglib::boolean_2d_array::boolean_2d_array() +alglib::boolean_2d_array::boolean_2d_array():ae_matrix_wrapper(alglib_impl::DT_BOOL) { - allocate_own(0, 0, alglib_impl::DT_BOOL); } -alglib::boolean_2d_array::boolean_2d_array(const alglib::boolean_2d_array &rhs) +alglib::boolean_2d_array::boolean_2d_array(const alglib::boolean_2d_array &rhs):ae_matrix_wrapper(rhs,alglib_impl::DT_BOOL) { - create(rhs); } -alglib::boolean_2d_array::boolean_2d_array(alglib_impl::ae_matrix *p) +alglib::boolean_2d_array::boolean_2d_array(alglib_impl::ae_matrix *p):ae_matrix_wrapper(p,alglib_impl::DT_BOOL) { - p_mat = NULL; - attach_to(p); } -alglib::boolean_2d_array::boolean_2d_array(const char *s) +alglib::boolean_2d_array::~boolean_2d_array() { - create(s, alglib_impl::DT_BOOL); } -alglib::boolean_2d_array::~boolean_2d_array() +const alglib::boolean_2d_array& alglib::boolean_2d_array::operator=(const alglib::boolean_2d_array &rhs) { + return static_cast(assign(rhs)); } const ae_bool& alglib::boolean_2d_array::operator()(ae_int_t i, ae_int_t j) const { - return p_mat->ptr.pp_bool[i][j]; + return ptr->ptr.pp_bool[i][j]; } ae_bool& alglib::boolean_2d_array::operator()(ae_int_t i, ae_int_t j) { - return p_mat->ptr.pp_bool[i][j]; + return ptr->ptr.pp_bool[i][j]; } const ae_bool* alglib::boolean_2d_array::operator[](ae_int_t i) const { - return p_mat->ptr.pp_bool[i]; + return ptr->ptr.pp_bool[i]; } ae_bool* alglib::boolean_2d_array::operator[](ae_int_t i) { - return p_mat->ptr.pp_bool[i]; + return ptr->ptr.pp_bool[i]; } void alglib::boolean_2d_array::setcontent(ae_int_t irows, ae_int_t icols, const bool *pContent ) { ae_int_t i, j; + + // setlength(), handle possible exception-free errors setlength(irows, icols); + if( ptr==NULL || ptr->rows!=irows || ptr->cols!=icols ) + return; + + // copy for(i=0; iptr.pp_bool[i][j] = pContent[i*icols+j]; + ptr->ptr.pp_bool[i][j] = pContent[i*icols+j]; +} + +#if !defined(AE_NO_EXCEPTIONS) +alglib::boolean_2d_array::boolean_2d_array(const char *s):ae_matrix_wrapper(s, alglib_impl::DT_BOOL) +{ } std::string alglib::boolean_2d_array::tostring() const @@ -6829,59 +8365,67 @@ result += "]"; return result; } +#endif -alglib::integer_2d_array::integer_2d_array() +alglib::integer_2d_array::integer_2d_array():ae_matrix_wrapper(alglib_impl::DT_INT) { - allocate_own(0, 0, alglib_impl::DT_INT); } -alglib::integer_2d_array::integer_2d_array(const alglib::integer_2d_array &rhs) +alglib::integer_2d_array::integer_2d_array(const alglib::integer_2d_array &rhs):ae_matrix_wrapper(rhs,alglib_impl::DT_INT) { - create(rhs); } -alglib::integer_2d_array::integer_2d_array(alglib_impl::ae_matrix *p) +alglib::integer_2d_array::integer_2d_array(alglib_impl::ae_matrix *p):ae_matrix_wrapper(p,alglib_impl::DT_INT) { - p_mat = NULL; - attach_to(p); } -alglib::integer_2d_array::integer_2d_array(const char *s) +alglib::integer_2d_array::~integer_2d_array() { - create(s, alglib_impl::DT_INT); } -alglib::integer_2d_array::~integer_2d_array() +const alglib::integer_2d_array& alglib::integer_2d_array::operator=(const alglib::integer_2d_array &rhs) { + return static_cast(assign(rhs)); } const alglib::ae_int_t& alglib::integer_2d_array::operator()(ae_int_t i, ae_int_t j) const { - return p_mat->ptr.pp_int[i][j]; + return ptr->ptr.pp_int[i][j]; } alglib::ae_int_t& alglib::integer_2d_array::operator()(ae_int_t i, ae_int_t j) { - return p_mat->ptr.pp_int[i][j]; + return ptr->ptr.pp_int[i][j]; } const alglib::ae_int_t* alglib::integer_2d_array::operator[](ae_int_t i) const { - return p_mat->ptr.pp_int[i]; + return ptr->ptr.pp_int[i]; } alglib::ae_int_t* alglib::integer_2d_array::operator[](ae_int_t i) { - return p_mat->ptr.pp_int[i]; + return ptr->ptr.pp_int[i]; } void alglib::integer_2d_array::setcontent(ae_int_t irows, ae_int_t icols, const ae_int_t *pContent ) { ae_int_t i, j; + + // setlength(), handle possible exception-free errors setlength(irows, icols); + if( ptr==NULL || ptr->rows!=irows || ptr->cols!=icols ) + return; + + // copy for(i=0; iptr.pp_int[i][j] = pContent[i*icols+j]; + ptr->ptr.pp_int[i][j] = pContent[i*icols+j]; +} + +#if !defined(AE_NO_EXCEPTIONS) +alglib::integer_2d_array::integer_2d_array(const char *s):ae_matrix_wrapper(s, alglib_impl::DT_INT) +{ } std::string alglib::integer_2d_array::tostring() const @@ -6900,59 +8444,98 @@ result += "]"; return result; } +#endif -alglib::real_2d_array::real_2d_array() +alglib::real_2d_array::real_2d_array():ae_matrix_wrapper(alglib_impl::DT_REAL) { - allocate_own(0, 0, alglib_impl::DT_REAL); } -alglib::real_2d_array::real_2d_array(const alglib::real_2d_array &rhs) +alglib::real_2d_array::real_2d_array(const alglib::real_2d_array &rhs):ae_matrix_wrapper(rhs,alglib_impl::DT_REAL) { - create(rhs); } -alglib::real_2d_array::real_2d_array(alglib_impl::ae_matrix *p) +alglib::real_2d_array::real_2d_array(alglib_impl::ae_matrix *p):ae_matrix_wrapper(p,alglib_impl::DT_REAL) { - p_mat = NULL; - attach_to(p); } -alglib::real_2d_array::real_2d_array(const char *s) +alglib::real_2d_array::~real_2d_array() { - create(s, alglib_impl::DT_REAL); } -alglib::real_2d_array::~real_2d_array() +const alglib::real_2d_array& alglib::real_2d_array::operator=(const alglib::real_2d_array &rhs) { + return static_cast(assign(rhs)); } const double& alglib::real_2d_array::operator()(ae_int_t i, ae_int_t j) const { - return p_mat->ptr.pp_double[i][j]; + return ptr->ptr.pp_double[i][j]; } double& alglib::real_2d_array::operator()(ae_int_t i, ae_int_t j) { - return p_mat->ptr.pp_double[i][j]; + return ptr->ptr.pp_double[i][j]; } const double* alglib::real_2d_array::operator[](ae_int_t i) const { - return p_mat->ptr.pp_double[i]; + return ptr->ptr.pp_double[i]; } double* alglib::real_2d_array::operator[](ae_int_t i) { - return p_mat->ptr.pp_double[i]; + return ptr->ptr.pp_double[i]; +} + +void alglib::real_2d_array::setcontent(ae_int_t irows, ae_int_t icols, const double *pContent ) +{ + ae_int_t i, j; + + // setlength(), handle possible exception-free errors + setlength(irows, icols); + if( ptr==NULL || ptr->rows!=irows || ptr->cols!=icols ) + return; + + // copy + for(i=0; iptr.pp_double[i][j] = pContent[i*icols+j]; +} + +void alglib::real_2d_array::attach_to_ptr(ae_int_t irows, ae_int_t icols, double *pContent ) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + alglib_impl::x_matrix x; + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + ptr = NULL; + is_frozen_proxy = false; + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(!is_frozen_proxy, "ALGLIB: unable to attach proxy object to something else", &_state); + alglib_impl::ae_assert(irows>0&&icols>0, "ALGLIB: non-positive length for attach_to_ptr()", &_state); + x.rows = irows; + x.cols = icols; + x.stride = icols; + x.datatype = alglib_impl::DT_REAL; + x.owner = alglib_impl::OWN_CALLER; + x.last_action = alglib_impl::ACT_UNCHANGED; + x.x_ptr.p_ptr = pContent; + attach_to(&x, &_state); + ae_state_clear(&_state); } -void alglib::real_2d_array::setcontent(ae_int_t irows, ae_int_t icols, const double *pContent ) +#if !defined(AE_NO_EXCEPTIONS) +alglib::real_2d_array::real_2d_array(const char *s):ae_matrix_wrapper(s, alglib_impl::DT_REAL) { - ae_int_t i, j; - setlength(irows, icols); - for(i=0; iptr.pp_double[i][j] = pContent[i*icols+j]; } std::string alglib::real_2d_array::tostring(int dps) const @@ -6971,64 +8554,72 @@ result += "]"; return result; } +#endif -alglib::complex_2d_array::complex_2d_array() +alglib::complex_2d_array::complex_2d_array():ae_matrix_wrapper(alglib_impl::DT_COMPLEX) { - allocate_own(0, 0, alglib_impl::DT_COMPLEX); } -alglib::complex_2d_array::complex_2d_array(const alglib::complex_2d_array &rhs) +alglib::complex_2d_array::complex_2d_array(const alglib::complex_2d_array &rhs):ae_matrix_wrapper(rhs,alglib_impl::DT_COMPLEX) { - create(rhs); } -alglib::complex_2d_array::complex_2d_array(alglib_impl::ae_matrix *p) +alglib::complex_2d_array::complex_2d_array(alglib_impl::ae_matrix *p):ae_matrix_wrapper(p,alglib_impl::DT_COMPLEX) { - p_mat = NULL; - attach_to(p); } -alglib::complex_2d_array::complex_2d_array(const char *s) +alglib::complex_2d_array::~complex_2d_array() { - create(s, alglib_impl::DT_COMPLEX); } -alglib::complex_2d_array::~complex_2d_array() +const alglib::complex_2d_array& alglib::complex_2d_array::operator=(const alglib::complex_2d_array &rhs) { + return static_cast(assign(rhs)); } const alglib::complex& alglib::complex_2d_array::operator()(ae_int_t i, ae_int_t j) const { - return *((const alglib::complex*)(p_mat->ptr.pp_complex[i]+j)); + return *((const alglib::complex*)(ptr->ptr.pp_complex[i]+j)); } alglib::complex& alglib::complex_2d_array::operator()(ae_int_t i, ae_int_t j) { - return *((alglib::complex*)(p_mat->ptr.pp_complex[i]+j)); + return *((alglib::complex*)(ptr->ptr.pp_complex[i]+j)); } const alglib::complex* alglib::complex_2d_array::operator[](ae_int_t i) const { - return (const alglib::complex*)(p_mat->ptr.pp_complex[i]); + return (const alglib::complex*)(ptr->ptr.pp_complex[i]); } alglib::complex* alglib::complex_2d_array::operator[](ae_int_t i) { - return (alglib::complex*)(p_mat->ptr.pp_complex[i]); + return (alglib::complex*)(ptr->ptr.pp_complex[i]); } void alglib::complex_2d_array::setcontent(ae_int_t irows, ae_int_t icols, const alglib::complex *pContent ) { ae_int_t i, j; + + // setlength(), handle possible exception-free errors setlength(irows, icols); + if( ptr==NULL || ptr->rows!=irows || ptr->cols!=icols ) + return; + + // copy for(i=0; iptr.pp_complex[i][j].x = pContent[i*icols+j].x; - p_mat->ptr.pp_complex[i][j].y = pContent[i*icols+j].y; + ptr->ptr.pp_complex[i][j].x = pContent[i*icols+j].x; + ptr->ptr.pp_complex[i][j].y = pContent[i*icols+j].y; } } +#if !defined(AE_NO_EXCEPTIONS) +alglib::complex_2d_array::complex_2d_array(const char *s):ae_matrix_wrapper(s, alglib_impl::DT_COMPLEX) +{ +} + std::string alglib::complex_2d_array::tostring(int dps) const { std::string result; @@ -7045,7 +8636,7 @@ result += "]"; return result; } - +#endif /******************************************************************** Internal functions @@ -7116,15 +8707,21 @@ } } +#if !defined(AE_NO_EXCEPTIONS) +// +// This function filters out all spaces from the string. +// It returns string allocated with ae_malloc(). +// On allocaction failure returns NULL. +// char* alglib::filter_spaces(const char *s) { size_t i, n; char *r; char *r0; n = strlen(s); - r = (char*)alglib_impl::ae_malloc(n+1, NULL); + r = (char*)alglib_impl::ae_malloc(n+1,NULL); if( r==NULL ) - throw ap_error("malloc error"); + return r; for(i=0,r0=r; i<=n; i++,s++) if( !isspace(*s) ) { @@ -7142,7 +8739,7 @@ // p_vec->clear(); if( *src!='[' ) - throw alglib::ap_error("Incorrect initializer for vector"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for vector"); src++; if( *src==']' ) return; @@ -7150,12 +8747,12 @@ for(;;) { if( *src==0 ) - throw alglib::ap_error("Incorrect initializer for vector"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for vector"); if( *src==']' ) { if( src[1]==0 || !match_head_only) return; - throw alglib::ap_error("Incorrect initializer for vector"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for vector"); } if( *src==',' ) { @@ -7181,17 +8778,17 @@ // Parse non-empty string // if( *src!='[' ) - throw alglib::ap_error("Incorrect initializer for matrix"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for matrix"); src++; for(;;) { p_mat->push_back(std::vector()); str_vector_create(src, false, &p_mat->back()); if( p_mat->back().size()==0 || p_mat->back().size()!=(*p_mat)[0].size() ) - throw alglib::ap_error("Incorrect initializer for matrix"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for matrix"); src = strchr(src, ']'); if( src==NULL ) - throw alglib::ap_error("Incorrect initializer for matrix"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for matrix"); src++; if( *src==',' ) { @@ -7200,11 +8797,11 @@ } if( *src==']' ) break; - throw alglib::ap_error("Incorrect initializer for matrix"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for matrix"); } src++; if( *src!=0 ) - throw alglib::ap_error("Incorrect initializer for matrix"); + _ALGLIB_CPP_EXCEPTION("Incorrect initializer for matrix"); } ae_bool alglib::parse_bool_delim(const char *s, const char *delim) @@ -7219,7 +8816,7 @@ if( my_stricmp(buf, p)==0 ) { if( s[strlen(p)]==0 || strchr(delim,s[strlen(p)])==NULL ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); return ae_false; } @@ -7230,12 +8827,12 @@ if( my_stricmp(buf, p)==0 ) { if( s[strlen(p)]==0 || strchr(delim,s[strlen(p)])==NULL ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); return ae_true; } // error - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); } alglib::ae_int_t alglib::parse_int_delim(const char *s, const char *delim) @@ -7255,18 +8852,18 @@ if( *s=='-' || *s=='+' ) s++; if( *s==0 || strchr("1234567890",*s)==NULL) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); while( *s!=0 && strchr("1234567890",*s)!=NULL ) s++; if( *s==0 || strchr(delim,*s)==NULL ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); // convert and ensure that value fits into ae_int_t s = p; long_val = atol(s); ae_val = long_val; if( ae_val!=long_val ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); return ae_val; } @@ -7368,7 +8965,7 @@ double result; const char *new_s; if( !_parse_real_delim(s, delim, &result, &new_s) ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); return result; } @@ -7387,10 +8984,10 @@ { s = new_s; if( !_parse_real_delim(s, "i", &c_result.y, &new_s) ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); s = new_s+1; if( *s==0 || strchr(delim,*s)==NULL ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); return c_result; } @@ -7399,7 +8996,7 @@ { s = new_s+1; if( *s==0 ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); if( strchr(delim,*s)!=NULL ) { c_result.x = 0; @@ -7408,14 +9005,14 @@ if( strchr("+-",*s)!=NULL ) { if( !_parse_real_delim(s, delim, &c_result.x, &new_s) ) - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); return c_result; } - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); } // error - throw alglib::ap_error("Cannot parse value"); + _ALGLIB_CPP_EXCEPTION("Cannot parse value"); } std::string alglib::arraytostring(const bool *ptr, ae_int_t n) @@ -7442,7 +9039,7 @@ for(i=0; i=(int)sizeof(buf) ) - throw ap_error("arraytostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("arraytostring(): buffer overflow"); result += buf; } result += "]"; @@ -7459,16 +9056,16 @@ int dps = _dps>=0 ? _dps : -_dps; result = "["; if( sprintf(mask1, "%%.%d%s", dps, _dps>=0 ? "f" : "e")>=(int)sizeof(mask1) ) - throw ap_error("arraytostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("arraytostring(): buffer overflow"); if( sprintf(mask2, ",%s", mask1)>=(int)sizeof(mask2) ) - throw ap_error("arraytostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("arraytostring(): buffer overflow"); for(i=0; i=(int)sizeof(buf) ) - throw ap_error("arraytostring(): buffer overflow"); + _ALGLIB_CPP_EXCEPTION("arraytostring(): buffer overflow"); } else if( fp_isnan(ptr[i]) ) strcpy(buf, i==0 ? "NAN" : ",NAN"); @@ -7496,6 +9093,7 @@ result += "]"; return result; } +#endif /******************************************************************** @@ -7635,6 +9233,7 @@ /******************************************************************** CSV functions ********************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) void alglib::read_csv(const char *filename, char separator, int flags, alglib::real_2d_array &out) { int flag; @@ -7645,542 +9244,137 @@ bool skip_first_row = (flags&CSV_SKIP_HEADERS)!=0; // - // Prepare empty output array - // - out.setlength(0,0); - - // - // Open file, determine size, read contents - // - FILE *f_in = fopen(filename, "rb"); - if( f_in==NULL ) - throw alglib::ap_error("read_csv: unable to open input file"); - flag = fseek(f_in, 0, SEEK_END); - AE_CRITICAL_ASSERT(flag==0); - long int _filesize = ftell(f_in); - AE_CRITICAL_ASSERT(_filesize>=0); - if( _filesize==0 ) - { - // empty file, return empty array, success - fclose(f_in); - return; - } - size_t filesize = _filesize; - std::vector v_buf; - v_buf.resize(filesize+2, 0); - char *p_buf = &v_buf[0]; - flag = fseek(f_in, 0, SEEK_SET); - AE_CRITICAL_ASSERT(flag==0); - size_t bytes_read = fread ((void*)p_buf, 1, filesize, f_in); - AE_CRITICAL_ASSERT(bytes_read==filesize); - fclose(f_in); - - // - // Normalize file contents: - // * replace 0x0 by spaces - // * remove trailing spaces and newlines - // * append trailing '\n' and '\0' characters - // Return if file contains only spaces/newlines. - // - for(size_t i=0; i0; ) - { - char c = p_buf[filesize-1]; - if( c==' ' || c=='\t' || c=='\n' || c=='\r' ) - { - filesize--; - continue; - } - break; - } - if( filesize==0 ) - return; - p_buf[filesize+0] = '\n'; - p_buf[filesize+1] = '\0'; - filesize+=2; - - // - // Scan dataset. - // - size_t rows_count = 0, cols_count = 0, max_length = 0; - std::vector offsets, lengths; - for(size_t row_start=0; p_buf[row_start]!=0x0; ) - { - // determine row length - size_t row_length; - for(row_length=0; p_buf[row_start+row_length]!='\n'; row_length++); - - // determine cols count, perform integrity check - size_t cur_cols_cnt=1; - for(size_t idx=0; idx0 && cols_count!=cur_cols_cnt ) - throw alglib::ap_error("read_csv: non-rectangular contents, rows have different sizes"); - cols_count = cur_cols_cnt; - - // store offsets and lengths of the fields - size_t cur_offs = 0; - for(size_t idx=0; idxmax_length ? idx-cur_offs : max_length; - cur_offs = idx+1; - } - - // advance row start - rows_count++; - row_start = row_start+row_length+1; - } - AE_CRITICAL_ASSERT(rows_count>=1); - AE_CRITICAL_ASSERT(cols_count>=1); - AE_CRITICAL_ASSERT(cols_count*rows_count==offsets.size()); - AE_CRITICAL_ASSERT(cols_count*rows_count==lengths.size()); - if( rows_count==1 && skip_first_row ) // empty output, return - return; - - // - // Convert - // - size_t row0 = skip_first_row ? 1 : 0; - size_t row1 = rows_count; - lconv *loc = localeconv(); - out.setlength(row1-row0, cols_count); - for(size_t ridx=row0; ridxdecimal_point; - out[ridx-row0][cidx] = atof(p_field); - } -} - -/******************************************************************** -Dataset functions -********************************************************************/ -/*bool alglib::readstrings(std::string file, std::list *pOutput) -{ - return readstrings(file, pOutput, ""); -} - -bool alglib::readstrings(std::string file, std::list *pOutput, std::string comment) -{ - std::string cmd, s; - FILE *f; - char buf[32768]; - char *str; - - f = fopen(file.c_str(), "rb"); - if( !f ) - return false; - s = ""; - pOutput->clear(); - while(str=fgets(buf, sizeof(buf), f)) - { - // TODO: read file by small chunks, combine in one large string - if( strlen(str)==0 ) - continue; - - // - // trim trailing newline chars - // - char *eos = str+strlen(str)-1; - if( *eos=='\n' ) - { - *eos = 0; - eos--; - } - if( *eos=='\r' ) - { - *eos = 0; - eos--; - } - s = str; - - // - // skip comments - // - if( comment.length()>0 ) - if( strncmp(s.c_str(), comment.c_str(), comment.length())==0 ) - { - s = ""; - continue; - } - - // - // read data - // - if( s.length()<1 ) - { - fclose(f); - throw alglib::ap_error("internal error in read_strings"); - } - pOutput->push_back(s); - } - fclose(f); - return true; -} - -void alglib::explodestring(std::string s, char sep, std::vector *pOutput) -{ - std::string tmp; - int i; - tmp = ""; - pOutput->clear(); - for(i=0; ipush_back(tmp); - tmp = ""; - } - if( tmp.length()!=0 ) - pOutput->push_back(tmp); -} - -std::string alglib::strtolower(const std::string &s) -{ - std::string r = s; - for(int i=0; i Lines; - std::vector Values, RowsArr, ColsArr, VarsArr, HeadArr; - std::list::iterator i; - std::string s; - int TrnFirst, TrnLast, ValFirst, ValLast, TstFirst, TstLast, LinesRead, j; - - // - // Read data - // - if( pdataset==NULL ) - return false; - if( !readstrings(file, &Lines, "//") ) - return false; - i = Lines.begin(); - *pdataset = dataset(); - - // - // Read header - // - if( i==Lines.end() ) - return false; - s = alglib::xtrim(*i); - alglib::explodestring(s, '#', &HeadArr); - if( HeadArr.size()!=2 ) - return false; - - // - // Rows info - // - alglib::explodestring(alglib::xtrim(HeadArr[0]), ' ', &RowsArr); - if( RowsArr.size()==0 || RowsArr.size()>3 ) - return false; - if( RowsArr.size()==1 ) - { - pdataset->totalsize = atol(RowsArr[0].c_str()); - pdataset->trnsize = pdataset->totalsize; - } - if( RowsArr.size()==2 ) - { - pdataset->trnsize = atol(RowsArr[0].c_str()); - pdataset->tstsize = atol(RowsArr[1].c_str()); - pdataset->totalsize = pdataset->trnsize + pdataset->tstsize; - } - if( RowsArr.size()==3 ) - { - pdataset->trnsize = atol(RowsArr[0].c_str()); - pdataset->valsize = atol(RowsArr[1].c_str()); - pdataset->tstsize = atol(RowsArr[2].c_str()); - pdataset->totalsize = pdataset->trnsize + pdataset->valsize + pdataset->tstsize; - } - if( pdataset->totalsize<=0 || pdataset->trnsize<0 || pdataset->valsize<0 || pdataset->tstsize<0 ) - return false; - TrnFirst = 0; - TrnLast = TrnFirst + pdataset->trnsize; - ValFirst = TrnLast; - ValLast = ValFirst + pdataset->valsize; - TstFirst = ValLast; - TstLast = TstFirst + pdataset->tstsize; - - // - // columns - // - alglib::explodestring(alglib::xtrim(HeadArr[1]), ' ', &ColsArr); - if( ColsArr.size()!=1 && ColsArr.size()!=4 ) - return false; - if( ColsArr.size()==1 ) - { - pdataset->nin = atoi(ColsArr[0].c_str()); - if( pdataset->nin<=0 ) - return false; - } - if( ColsArr.size()==4 ) - { - if( alglib::strtolower(ColsArr[0])!="reg" && alglib::strtolower(ColsArr[0])!="cls" ) - return false; - if( ColsArr[2]!="=>" ) - return false; - pdataset->nin = atol(ColsArr[1].c_str()); - if( pdataset->nin<1 ) - return false; - if( alglib::strtolower(ColsArr[0])=="reg" ) - { - pdataset->nclasses = 0; - pdataset->nout = atol(ColsArr[3].c_str()); - if( pdataset->nout<1 ) - return false; - } - else - { - pdataset->nclasses = atol(ColsArr[3].c_str()); - pdataset->nout = 1; - if( pdataset->nclasses<2 ) - return false; - } - } - + // Prepare empty output array // - // initialize arrays + out.setlength(0,0); + // - pdataset->all.setlength(pdataset->totalsize, pdataset->nin+pdataset->nout); - if( pdataset->trnsize>0 ) pdataset->trn.setlength(pdataset->trnsize, pdataset->nin+pdataset->nout); - if( pdataset->valsize>0 ) pdataset->val.setlength(pdataset->valsize, pdataset->nin+pdataset->nout); - if( pdataset->tstsize>0 ) pdataset->tst.setlength(pdataset->tstsize, pdataset->nin+pdataset->nout); - + // Open file, determine size, read contents + // + FILE *f_in = fopen(filename, "rb"); + if( f_in==NULL ) + _ALGLIB_CPP_EXCEPTION("read_csv: unable to open input file"); + flag = fseek(f_in, 0, SEEK_END); + AE_CRITICAL_ASSERT(flag==0); + long int _filesize = ftell(f_in); + AE_CRITICAL_ASSERT(_filesize>=0); + if( _filesize==0 ) + { + // empty file, return empty array, success + fclose(f_in); + return; + } + size_t filesize = _filesize; + std::vector v_buf; + v_buf.resize(filesize+2, 0); + char *p_buf = &v_buf[0]; + flag = fseek(f_in, 0, SEEK_SET); + AE_CRITICAL_ASSERT(flag==0); + size_t bytes_read = fread ((void*)p_buf, 1, filesize, f_in); + AE_CRITICAL_ASSERT(bytes_read==filesize); + fclose(f_in); + // - // read data + // Normalize file contents: + // * replace 0x0 by spaces + // * remove trailing spaces and newlines + // * append trailing '\n' and '\0' characters + // Return if file contains only spaces/newlines. // - for(LinesRead=0, i++; i!=Lines.end() && LinesReadtotalsize; i++, LinesRead++) + for(size_t i=0; i0; ) { - std::string sss = *i; - alglib::explodestring(alglib::xtrim(*i), ' ', &VarsArr); - if( VarsArr.size()!=pdataset->nin+pdataset->nout ) - return false; - int tmpc = alglib::round(atof(VarsArr[pdataset->nin+pdataset->nout-1].c_str())); - if( pdataset->nclasses>0 && (tmpc<0 || tmpc>=pdataset->nclasses) ) - return false; - for(j=0; jnin+pdataset->nout; j++) + char c = p_buf[filesize-1]; + if( c==' ' || c=='\t' || c=='\n' || c=='\r' ) { - pdataset->all(LinesRead,j) = atof(VarsArr[j].c_str()); - if( LinesRead>=TrnFirst && LinesReadtrn(LinesRead-TrnFirst,j) = atof(VarsArr[j].c_str()); - if( LinesRead>=ValFirst && LinesReadval(LinesRead-ValFirst,j) = atof(VarsArr[j].c_str()); - if( LinesRead>=TstFirst && LinesReadtst(LinesRead-TstFirst,j) = atof(VarsArr[j].c_str()); + filesize--; + continue; } + break; } - if( LinesRead!=pdataset->totalsize ) - return false; - return true; -}*/ - -/* -previous variant -bool alglib::opendataset(std::string file, dataset *pdataset) -{ - std::list Lines; - std::vector Values; - std::list::iterator i; - int nCol, nRow, nSplitted; - int nColumns, nRows; - - // - // Read data - // - if( pdataset==NULL ) - return false; - if( !readstrings(file, &Lines, "//") ) - return false; - i = Lines.begin(); - *pdataset = dataset(); - + if( filesize==0 ) + return; + p_buf[filesize+0] = '\n'; + p_buf[filesize+1] = '\0'; + filesize+=2; + // - // Read columns info + // Scan dataset. // - if( i==Lines.end() ) - return false; - if( sscanf(i->c_str(), " columns = %d %d ", &pdataset->nin, &pdataset->nout)!=2 ) - return false; - if( pdataset->nin<=0 || pdataset->nout==0 || pdataset->nout==-1) - return false; - if( pdataset->nout<0 ) - { - pdataset->nclasses = -pdataset->nout; - pdataset->nout = 1; - pdataset->iscls = true; - } - else + size_t rows_count = 0, cols_count = 0, max_length = 0; + std::vector offsets, lengths; + for(size_t row_start=0; p_buf[row_start]!=0x0; ) { - pdataset->isreg = true; + // determine row length + size_t row_length; + for(row_length=0; p_buf[row_start+row_length]!='\n'; row_length++); + + // determine cols count, perform integrity check + size_t cur_cols_cnt=1; + for(size_t idx=0; idx0 && cols_count!=cur_cols_cnt ) + _ALGLIB_CPP_EXCEPTION("read_csv: non-rectangular contents, rows have different sizes"); + cols_count = cur_cols_cnt; + + // store offsets and lengths of the fields + size_t cur_offs = 0; + for(size_t idx=0; idxmax_length ? idx-cur_offs : max_length; + cur_offs = idx+1; + } + + // advance row start + rows_count++; + row_start = row_start+row_length+1; } - nColumns = pdataset->nin+pdataset->nout; - i++; - - // - // Read rows info - // - if( i==Lines.end() ) - return false; - if( sscanf(i->c_str(), " rows = %d %d %d ", &pdataset->trnsize, &pdataset->valsize, &pdataset->tstsize)!=3 ) - return false; - if( (pdataset->trnsize<0) || (pdataset->valsize<0) || (pdataset->tstsize<0) ) - return false; - if( (pdataset->trnsize==0) && (pdataset->valsize==0) && (pdataset->tstsize==0) ) - return false; - nRows = pdataset->trnsize+pdataset->valsize+pdataset->tstsize; - pdataset->size = nRows; - if( Lines.size()!=nRows+2 ) - return false; - i++; - + AE_CRITICAL_ASSERT(rows_count>=1); + AE_CRITICAL_ASSERT(cols_count>=1); + AE_CRITICAL_ASSERT(cols_count*rows_count==offsets.size()); + AE_CRITICAL_ASSERT(cols_count*rows_count==lengths.size()); + if( rows_count==1 && skip_first_row ) // empty output, return + return; + // - // Read all cases + // Convert // - alglib::real_2d_array &arr = pdataset->all; - arr.setbounds(0, nRows-1, 0, nColumns-1); - for(nRow=0; nRowiscls && ((round(v)<0) || (round(v)>=pdataset->nclasses)) ) - return false; - if( (nCol==nColumns-1) && pdataset->iscls ) - arr(nRow, nCol) = round(v); - else - arr(nRow, nCol) = v; + char *p_field = p_buf+offsets[ridx*cols_count+cidx]; + size_t field_len = lengths[ridx*cols_count+cidx]; + for(size_t idx=0; idxdecimal_point; + out[ridx-row0][cidx] = atof(p_field); } - i++; - } +} +#endif - // - // Split to training, validation and test sets - // - if( pdataset->trnsize>0 ) - pdataset->trn.setbounds(0, pdataset->trnsize-1, 0, nColumns-1); - if( pdataset->valsize>0 ) - pdataset->val.setbounds(0, pdataset->valsize-1, 0, nColumns-1); - if( pdataset->tstsize>0 ) - pdataset->tst.setbounds(0, pdataset->tstsize-1, 0, nColumns-1); - nSplitted=0; - for(nRow=0; nRow<=pdataset->trnsize-1; nRow++, nSplitted++) - for(nCol=0; nCol<=nColumns-1; nCol++) - pdataset->trn(nRow,nCol) = arr(nSplitted,nCol); - for(nRow=0; nRow<=pdataset->valsize-1; nRow++, nSplitted++) - for(nCol=0; nCol<=nColumns-1; nCol++) - pdataset->val(nRow,nCol) = arr(nSplitted,nCol); - for(nRow=0; nRow<=pdataset->tstsize-1; nRow++, nSplitted++) - for(nCol=0; nCol<=nColumns-1; nCol++) - pdataset->tst(nRow,nCol) = arr(nSplitted,nCol); - return true; -}*/ -alglib::ae_int_t alglib::vlen(ae_int_t n1, ae_int_t n2) + +/******************************************************************** +Trace functions +********************************************************************/ +void alglib::trace_file(std::string tags, std::string filename) { - return n2-n1+1; + alglib_impl::ae_trace_file(tags.c_str(), filename.c_str()); +} + +void alglib::trace_disable() +{ + alglib_impl::ae_trace_disable(); } @@ -8199,9 +9393,9 @@ #define alglib_half_r_block 16 #define alglib_twice_r_block 64 -#define alglib_c_block 24 -#define alglib_half_c_block 12 -#define alglib_twice_c_block 48 +#define alglib_c_block 16 +#define alglib_half_c_block 8 +#define alglib_twice_c_block 32 /******************************************************************** @@ -9206,13 +10400,14 @@ #if defined(AE_HAS_SSE2_INTRINSICS) void _ialglib_mcopyblock_sse2(ae_int_t m, ae_int_t n, const double *a, ae_int_t op, ae_int_t stride, double *b) { - ae_int_t i, j, nb8, mb2, ntail; + ae_int_t i, j, mb2; const double *psrc0, *psrc1; double *pdst; - nb8 = n/8; - ntail = n-8*nb8; if( op==0 ) { + ae_int_t nb8, ntail; + nb8 = n/8; + ntail = n-8*nb8; for(i=0,psrc0=a; ialglib_c_block || k>alglib_c_block ) return ae_false; @@ -10136,8 +11331,8 @@ ae_int_t i; double _loc_abuf[alglib_r_block*alglib_r_block+alglib_simd_alignment]; double _loc_cbuf[alglib_r_block*alglib_r_block+alglib_simd_alignment]; - double * const abuf = (double * const) ae_align(_loc_abuf, alglib_simd_alignment); - double * const cbuf = (double * const) ae_align(_loc_cbuf, alglib_simd_alignment); + double * const abuf = (double *) ae_align(_loc_abuf, alglib_simd_alignment); + double * const cbuf = (double *) ae_align(_loc_cbuf, alglib_simd_alignment); if( n>alglib_r_block || k>alglib_r_block ) return ae_false; @@ -10207,9 +11402,19 @@ ae_complex *_u, ae_complex *_v) { + /* + * Locals + */ ae_complex *arow, *pu, *pv, *vtmp, *dst; ae_int_t n2 = n/2; ae_int_t i, j; + + /* + * Quick exit + */ + if( m<=0 || n<=0 ) + return ae_false; + /* * update pairs of rows @@ -10255,6 +11460,7 @@ /******************************************************************** real rank-1 kernel +deprecated version ********************************************************************/ ae_bool _ialglib_rmatrixrank1(ae_int_t m, ae_int_t n, @@ -10263,6 +11469,9 @@ double *_u, double *_v) { + /* + * Locals + */ double *arow0, *arow1, *pu, *pv, *vtmp, *dst0, *dst1; ae_int_t m2 = m/2; ae_int_t n2 = n/2; @@ -10271,6 +11480,12 @@ ae_int_t i, j; /* + * Quick exit + */ + if( m<=0 || n<=0 ) + return ae_false; + + /* * update pairs of rows */ arow0 = _a; @@ -10324,6 +11539,93 @@ } + +/******************************************************************** +real rank-1 kernel +deprecated version +********************************************************************/ +ae_bool _ialglib_rmatrixger(ae_int_t m, + ae_int_t n, + double *_a, + ae_int_t _a_stride, + double alpha, + double *_u, + double *_v) +{ + /* + * Locals + */ + double *arow0, *arow1, *pu, *pv, *vtmp, *dst0, *dst1; + ae_int_t m2 = m/2; + ae_int_t n2 = n/2; + ae_int_t stride = _a_stride; + ae_int_t stride2 = 2*_a_stride; + ae_int_t i, j; + + /* + * Quick exit + */ + if( m<=0 || n<=0 || alpha==0.0 ) + return ae_false; + + /* + * update pairs of rows + */ + arow0 = _a; + arow1 = arow0+stride; + pu = _u; + vtmp = _v; + for(i=0; iptr.pp_double[ia]+ja, _a->stride, optypea, _b->ptr.pp_double[ib]+jb, _b->stride, optypeb, beta, _c->ptr.pp_double[ic]+jc, _c->stride); } @@ -10364,6 +11671,11 @@ ae_int_t ic, ae_int_t jc) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( (alpha.x==0.0 && alpha.y==0) || k==0 || n==0 || m==0 ) + return ae_false; + + /* handle with optimized ALGLIB kernel */ return _ialglib_cmatrixgemm(m, n, k, alpha, _a->ptr.pp_complex[ia]+ja, _a->stride, optypea, _b->ptr.pp_complex[ib]+jb, _b->stride, optypeb, beta, _c->ptr.pp_complex[ic]+jc, _c->stride); } @@ -10379,6 +11691,11 @@ ae_int_t i2, ae_int_t j2) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( m==0 || n==0) + return ae_false; + + /* handle with optimized ALGLIB kernel */ return _ialglib_cmatrixrighttrsm(m, n, &a->ptr.pp_complex[i1][j1], a->stride, isupper, isunit, optype, &x->ptr.pp_complex[i2][j2], x->stride); } @@ -10394,6 +11711,11 @@ ae_int_t i2, ae_int_t j2) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( m==0 || n==0) + return ae_false; + + /* handle with optimized ALGLIB kernel */ return _ialglib_rmatrixrighttrsm(m, n, &a->ptr.pp_double[i1][j1], a->stride, isupper, isunit, optype, &x->ptr.pp_double[i2][j2], x->stride); } @@ -10409,6 +11731,11 @@ ae_int_t i2, ae_int_t j2) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( m==0 || n==0) + return ae_false; + + /* handle with optimized ALGLIB kernel */ return _ialglib_cmatrixlefttrsm(m, n, &a->ptr.pp_complex[i1][j1], a->stride, isupper, isunit, optype, &x->ptr.pp_complex[i2][j2], x->stride); } @@ -10424,6 +11751,11 @@ ae_int_t i2, ae_int_t j2) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( m==0 || n==0) + return ae_false; + + /* handle with optimized ALGLIB kernel */ return _ialglib_rmatrixlefttrsm(m, n, &a->ptr.pp_double[i1][j1], a->stride, isupper, isunit, optype, &x->ptr.pp_double[i2][j2], x->stride); } @@ -10440,6 +11772,11 @@ ae_int_t jc, ae_bool isupper) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( alpha==0.0 || k==0 || n==0) + return ae_false; + + /* ALGLIB kernel */ return _ialglib_cmatrixherk(n, k, alpha, &a->ptr.pp_complex[ia][ja], a->stride, optypea, beta, &c->ptr.pp_complex[ic][jc], c->stride, isupper); } @@ -10456,6 +11793,11 @@ ae_int_t jc, ae_bool isupper) { + /* handle degenerate cases like zero matrices by ALGLIB - greatly simplifies passing data to ALGLIB kernel */ + if( alpha==0.0 || k==0 || n==0) + return ae_false; + + /* ALGLIB kernel */ return _ialglib_rmatrixsyrk(n, k, alpha, &a->ptr.pp_double[ia][ja], a->stride, optypea, beta, &c->ptr.pp_double[ic][jc], c->stride, isupper); } @@ -10485,6 +11827,20 @@ return _ialglib_rmatrixrank1(m, n, &a->ptr.pp_double[ia][ja], a->stride, &u->ptr.p_double[uoffs], &v->ptr.p_double[voffs]); } +ae_bool _ialglib_i_rmatrixgerf(ae_int_t m, + ae_int_t n, + ae_matrix *a, + ae_int_t ia, + ae_int_t ja, + double alpha, + ae_vector *u, + ae_int_t uoffs, + ae_vector *v, + ae_int_t voffs) +{ + return _ialglib_rmatrixger(m, n, &a->ptr.pp_double[ia][ja], a->stride, alpha, &u->ptr.p_double[uoffs], &v->ptr.p_double[voffs]); +} + diff -Nru alglib-3.10.0/src/ap.h alglib-3.16.0/src/ap.h --- alglib-3.10.0/src/ap.h 2015-08-19 12:24:23.000000000 +0000 +++ alglib-3.16.0/src/ap.h 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -25,6 +25,7 @@ #include #include #include +#include #include #if defined(__CODEGEARC__) @@ -41,21 +42,43 @@ #define AE_USE_CPP /* Definitions */ #define AE_UNKNOWN 0 -#define AE_MSVC 1 -#define AE_GNUC 2 -#define AE_SUNC 3 #define AE_INTEL 1 #define AE_SPARC 2 -#define AE_WINDOWS 1 -#define AE_POSIX 2 -#define AE_LOCK_ALIGNMENT 16 -/* in case no OS is defined, use AE_UNKNOWN */ -#ifndef AE_OS +/* OS definitions */ +#define AE_WINDOWS 1 +#define AE_POSIX 2 +#define AE_LINUX 304 +#if !defined(AE_OS) #define AE_OS AE_UNKNOWN #endif +#if AE_OS==AE_LINUX +#undef AE_OS +#define AE_OS AE_POSIX +#define _ALGLIB_USE_LINUX_EXTENSIONS +#endif + +/* threading models for AE_THREADING */ +#define AE_PARALLEL 100 +#define AE_SERIAL 101 +#define AE_SERIAL_UNSAFE 102 +#if !defined(AE_THREADING) +#define AE_THREADING AE_PARALLEL +#endif + +/* malloc types for AE_MALLOC */ +#define AE_STDLIB_MALLOC 200 +#define AE_BASIC_STATIC_MALLOC 201 +#if !defined(AE_MALLOC) +#define AE_MALLOC AE_STDLIB_MALLOC +#endif + +#define AE_LOCK_ALIGNMENT 16 /* automatically determine compiler */ +#define AE_MSVC 1 +#define AE_GNUC 2 +#define AE_SUNC 3 #define AE_COMPILER AE_UNKNOWN #ifdef __GNUC__ #undef AE_COMPILER @@ -79,9 +102,19 @@ #define ALIGNED #endif +/* state flags */ +#define _ALGLIB_FLG_THREADING_MASK 0x7 +#define _ALGLIB_FLG_THREADING_SHIFT 0 +#define _ALGLIB_FLG_THREADING_USE_GLOBAL 0x0 +#define _ALGLIB_FLG_THREADING_SERIAL 0x1 +#define _ALGLIB_FLG_THREADING_PARALLEL 0x2 + + /* now we are ready to include headers */ +#include #include #include +#include #include #include #include @@ -141,8 +174,8 @@ /* if we work under C++ environment, define several conditions */ #ifdef AE_USE_CPP #define AE_USE_CPP_BOOL -#define AE_USE_CPP_ERROR_HANDLING #define AE_USE_CPP_SERIALIZATION +#include #endif /* @@ -179,6 +212,21 @@ #endif #endif +#if defined(AE_UINT64_T) +typedef AE_UINT64_T ae_uint64_t; +#endif +#if defined(AE_HAVE_STDINT) && !defined(AE_UINT64_T) +typedef uint64_t ae_uint64_t; +#endif +#if !defined(AE_HAVE_STDINT) && !defined(AE_UINT64_T) +#if AE_COMPILER==AE_MSVC +typedef unsigned __int64 ae_uint64_t; +#endif +#if (AE_COMPILER==AE_GNUC) || (AE_COMPILER==AE_SUNC) || (AE_COMPILER==AE_UNKNOWN) +typedef unsigned long long ae_uint64_t; +#endif +#endif + #if !defined(AE_INT_T) typedef ptrdiff_t ae_int_t; #endif @@ -210,7 +258,7 @@ */ enum { OWN_CALLER=1, OWN_AE=2 }; enum { ACT_UNCHANGED=1, ACT_SAME_LOCATION=2, ACT_NEW_LOCATION=3 }; -enum { DT_BOOL=1, DT_INT=2, DT_REAL=3, DT_COMPLEX=4 }; +enum { DT_BOOL=1, DT_BYTE=1, DT_INT=2, DT_REAL=3, DT_COMPLEX=4 }; enum { CPU_SSE2=1 }; /************************************************************************ @@ -261,11 +309,15 @@ ************************************************************************/ typedef struct { - ALIGNED ae_int64_t cnt; - ALIGNED ae_int64_t datatype; - ALIGNED ae_int64_t owner; - ALIGNED ae_int64_t last_action; - ALIGNED void *ptr; + ae_int64_t cnt; + ae_int64_t datatype; + ae_int64_t owner; + ae_int64_t last_action; + union + { + void *p_ptr; + ae_int64_t portable_alignment_enforcer; + } x_ptr; } x_vector; @@ -298,13 +350,17 @@ ************************************************************************/ typedef struct { - ALIGNED ae_int64_t rows; - ALIGNED ae_int64_t cols; - ALIGNED ae_int64_t stride; - ALIGNED ae_int64_t datatype; - ALIGNED ae_int64_t owner; - ALIGNED ae_int64_t last_action; - ALIGNED void *ptr; + ae_int64_t rows; + ae_int64_t cols; + ae_int64_t stride; + ae_int64_t datatype; + ae_int64_t owner; + ae_int64_t last_action; + union + { + void *p_ptr; + ae_int64_t portable_alignment_enforcer; + } x_ptr; } x_matrix; @@ -319,6 +375,18 @@ may be null (for zero-size block), DYN_BOTTOM or DYN_FRAME for "special" blocks (frame/stack boundaries). +valgrind_hint is a special field which stores a special hint pointer for + Valgrind and other similar memory checking tools. ALGLIB + manually aligns pointers obtained via malloc, so ptr usually + points to location past the beginning of the actuallly + allocated memory. In such cases memory testing tools may + report "(possibly) lost" memory. + + This "hint" field stores pointer actually returned by + malloc (or NULL, if for some reason we do not support + this feature). This field is used merely as a hint for + Valgrind - it should NOT be used for anything else. + ************************************************************************/ typedef struct ae_dyn_block { @@ -326,8 +394,11 @@ /* void *deallocator; */ void (*deallocator)(void*); void * volatile ptr; + void* valgrind_hint; } ae_dyn_block; +typedef void(*ae_deallocator)(void*); + /************************************************************************ frame marker ************************************************************************/ @@ -369,11 +440,9 @@ ae_dyn_block last_block; /* - * jmp_buf for cases when C-style exception handling is used + * jmp_buf pointer for internal C-style exception handling */ -#ifndef AE_USE_CPP_ERROR_HANDLING jmp_buf * volatile break_jump; -#endif /* * ae_error_type of the last error (filled when exception is thrown) @@ -386,6 +455,11 @@ const char* volatile error_msg; /* + * Flags: call-local settings for ALGLIB + */ + ae_uint64_t flags; + + /* * threading information: * a) current thread pool * b) current worker thread @@ -402,8 +476,46 @@ } ae_state; /************************************************************************ -Serializer +Serializer: + +* ae_stream_writer type is a function pointer for stream writer method; + this pointer is used by X-core for out-of-core serialization (say, to + serialize ALGLIB structure directly to managed C# stream). + + This function accepts two parameters: pointer to ANSI (7-bit) string + and pointer-sized integer passed to serializer during initialization. + String being passed is a part of the data stream; aux paramerer may be + arbitrary value intended to be used by actual implementation of stream + writer. String parameter may include spaces and linefeed symbols, it + should be written to stream as is. + + Return value must be zero for success or non-zero for failure. + +* ae_stream_reader type is a function pointer for stream reader method; + this pointer is used by X-core for out-of-core unserialization (say, to + unserialize ALGLIB structure directly from managed C# stream). + + This function accepts three parameters: pointer-sized integer passed to + serializer during initialization; number of symbols to read from + stream; pointer to buffer used to store next token read from stream + (ANSI encoding is used, buffer is large enough to store all symbols and + trailing zero symbol). + + Number of symbols to read is always positive. + + After being called by X-core, this function must: + * skip all space and linefeed characters from the current position at + the stream and until first non-space non-linefeed character is found + * read exactly cnt symbols from stream to buffer; check that all + symbols being read are non-space non-linefeed ones + * append trailing zero symbol to buffer + * return value must be zero on success, non-zero if even one of the + conditions above fails. When reader returns non-zero value, contents + of buf is not used. ************************************************************************/ +typedef char(*ae_stream_writer)(const char *p_string, ae_int_t aux); +typedef char(*ae_stream_reader)(ae_int_t aux, ae_int_t cnt, char *p_buf); + typedef struct { ae_int_t mode; @@ -415,11 +527,13 @@ #ifdef AE_USE_CPP_SERIALIZATION std::string *out_cppstr; #endif - char *out_str; - const char *in_str; + char *out_str; /* pointer to the current position at the output buffer; advanced with each write operation */ + const char *in_str; /* pointer to the current position at the input buffer; advanced with each read operation */ + ae_int_t stream_aux; + ae_stream_writer stream_writer; + ae_stream_reader stream_reader; } ae_serializer; -typedef void(*ae_deallocator)(void*); typedef struct ae_vector { @@ -429,14 +543,14 @@ ae_int_t cnt; /* - * Either DT_BOOL, DT_INT, DT_REAL or DT_COMPLEX + * Either DT_BOOL/DT_BYTE, DT_INT, DT_REAL or DT_COMPLEX */ ae_datatype datatype; /* * If ptr points to memory owned and managed by ae_vector itself, * this field is ae_false. If vector was attached to x_vector structure - * with ae_vector_attach_to_x(), this field is ae_true. + * with ae_vector_init_attach_to_x(), this field is ae_true. */ ae_bool is_attached; @@ -455,6 +569,7 @@ { void *p_ptr; ae_bool *p_bool; + unsigned char *p_ubyte; ae_int_t *p_int; double *p_double; ae_complex *p_complex; @@ -471,7 +586,7 @@ /* * If ptr points to memory owned and managed by ae_vector itself, * this field is ae_false. If vector was attached to x_vector structure - * with ae_vector_attach_to_x(), this field is ae_true. + * with ae_vector_init_attach_to_x(), this field is ae_true. */ ae_bool is_attached; @@ -529,7 +644,22 @@ * Pointer to _lock structure. This pointer has type void* in order to * make header file OS-independent (lock declaration depends on OS). */ - void *ptr; + void *lock_ptr; + + /* + * For eternal=false this field manages pointer to _lock structure. + * + * ae_dyn_block structure is responsible for automatic deletion of + * the memory allocated for the pointer when its frame is destroyed. + */ + ae_dyn_block db; + + /* + * Whether we have eternal lock object (used by thread pool) or + * transient lock. Eternal locks are allocated without using ae_dyn_block + * structure and do not allow deallocation. + */ + ae_bool eternal; } ae_lock; @@ -575,10 +705,10 @@ ae_int_t size_of_object; /* initializer function; accepts pointer to malloc'ed object, initializes its fields */ - void (*init)(void* dst, ae_state* state); + void (*init)(void* dst, ae_state* state, ae_bool make_automatic); /* copy constructor; accepts pointer to malloc'ed, but not initialized object */ - void (*init_copy)(void* dst, void* src, ae_state* state); + void (*init_copy)(void* dst, void* src, ae_state* state, ae_bool make_automatic); /* destructor function; */ void (*destroy)(void* ptr); @@ -586,65 +716,100 @@ /* frame entry; contains pointer to the pool object itself */ ae_dyn_block frame_entry; } ae_shared_pool; - + +void ae_never_call_it(); +void ae_set_dbg_flag(ae_int64_t flag_id, ae_int64_t flag_val); +ae_int64_t ae_get_dbg_value(ae_int64_t id); +void ae_set_global_threading(ae_uint64_t flg_value); +ae_uint64_t ae_get_global_threading(); + +/************************************************************************ +Debugging and tracing functions +************************************************************************/ +void ae_set_error_flag(ae_bool *p_flag, ae_bool cond, const char *filename, int lineno, const char *xdesc); +const char * ae_get_last_error_file(); +int ae_get_last_error_line(); +const char * ae_get_last_error_xdesc(); + +void ae_trace_file(const char *tags, const char *filename); +void ae_trace_disable(); +ae_bool ae_is_trace_enabled(const char *tag); +void ae_trace(const char * printf_fmt, ...); + +/************************************************************************ +... +************************************************************************/ ae_int_t ae_misalignment(const void *ptr, size_t alignment); void* ae_align(void *ptr, size_t alignment); +ae_int_t ae_get_effective_workers(ae_int_t nworkers); +void ae_optional_atomic_add_i(ae_int_t *p, ae_int_t v); +void ae_optional_atomic_sub_i(ae_int_t *p, ae_int_t v); + void* aligned_malloc(size_t size, size_t alignment); +void* aligned_extract_ptr(void *block); void aligned_free(void *block); +void* eternal_malloc(size_t size); +#if AE_MALLOC==AE_BASIC_STATIC_MALLOC +void set_memory_pool(void *ptr, size_t size); +void memory_pool_stats(ae_int_t *bytes_used, ae_int_t *bytes_free); +#endif void* ae_malloc(size_t size, ae_state *state); void ae_free(void *p); ae_int_t ae_sizeof(ae_datatype datatype); +ae_bool ae_check_zeros(const void *ptr, ae_int_t n); void ae_touch_ptr(void *p); void ae_state_init(ae_state *state); void ae_state_clear(ae_state *state); -#ifndef AE_USE_CPP_ERROR_HANDLING void ae_state_set_break_jump(ae_state *state, jmp_buf *buf); -#endif +void ae_state_set_flags(ae_state *state, ae_uint64_t flags); +void ae_clean_up_before_breaking(ae_state *state); void ae_break(ae_state *state, ae_error_type error_type, const char *msg); void ae_frame_make(ae_state *state, ae_frame *tmp); void ae_frame_leave(ae_state *state); void ae_db_attach(ae_dyn_block *block, ae_state *state); -ae_bool ae_db_malloc(ae_dyn_block *block, ae_int_t size, ae_state *state, ae_bool make_automatic); -ae_bool ae_db_realloc(ae_dyn_block *block, ae_int_t size, ae_state *state); +void ae_db_init(ae_dyn_block *block, ae_int_t size, ae_state *state, ae_bool make_automatic); +void ae_db_realloc(ae_dyn_block *block, ae_int_t size, ae_state *state); void ae_db_free(ae_dyn_block *block); void ae_db_swap(ae_dyn_block *block1, ae_dyn_block *block2); -void ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state); -void ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state); -void ae_vector_init_from_x(ae_vector *dst, x_vector *src, ae_state *state); -void ae_vector_attach_to_x(ae_vector *dst, x_vector *src, ae_state *state); -ae_bool ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *state); +void ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state, ae_bool make_automatic); +void ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state, ae_bool make_automatic); +void ae_vector_init_from_x(ae_vector *dst, x_vector *src, ae_state *state, ae_bool make_automatic); +void ae_vector_init_attach_to_x(ae_vector *dst, x_vector *src, ae_state *state, ae_bool make_automatic); +void ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *state); +void ae_vector_resize(ae_vector *dst, ae_int_t newsize, ae_state *state); void ae_vector_clear(ae_vector *dst); void ae_vector_destroy(ae_vector *dst); void ae_swap_vectors(ae_vector *vec1, ae_vector *vec2); -void ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_datatype datatype, ae_state *state); -void ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state); -void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state); -void ae_matrix_attach_to_x(ae_matrix *dst, x_matrix *src, ae_state *state); -ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state); +void ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_datatype datatype, ae_state *state, ae_bool make_automatic); +void ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state, ae_bool make_automatic); +void ae_matrix_init_from_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic); +void ae_matrix_init_attach_to_x(ae_matrix *dst, x_matrix *src, ae_state *state, ae_bool make_automatic); +void ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state); void ae_matrix_clear(ae_matrix *dst); void ae_matrix_destroy(ae_matrix *dst); void ae_swap_matrices(ae_matrix *mat1, ae_matrix *mat2); -void ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *state); +void ae_smart_ptr_init(ae_smart_ptr *dst, void **subscriber, ae_state *state, ae_bool make_automatic); void ae_smart_ptr_clear(void *_dst); /* accepts ae_smart_ptr* */ void ae_smart_ptr_destroy(void *_dst); void ae_smart_ptr_assign(ae_smart_ptr *dst, void *new_ptr, ae_bool is_owner, ae_bool is_dynamic, void (*destroy)(void*)); void ae_smart_ptr_release(ae_smart_ptr *dst); void ae_yield(); -void ae_init_lock(ae_lock *lock); +void ae_init_lock(ae_lock *lock, ae_state *state, ae_bool make_automatic); +void ae_init_lock_eternal(ae_lock *lock); void ae_acquire_lock(ae_lock *lock); void ae_release_lock(ae_lock *lock); void ae_free_lock(ae_lock *lock); -void ae_shared_pool_init(void *_dst, ae_state *state); -void ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state); +void ae_shared_pool_init(void *_dst, ae_state *state, ae_bool make_automatic); +void ae_shared_pool_init_copy(void *_dst, void *_src, ae_state *state, ae_bool make_automatic); void ae_shared_pool_clear(void *dst); void ae_shared_pool_destroy(void *dst); ae_bool ae_shared_pool_is_initialized(void *_dst); @@ -652,8 +817,8 @@ ae_shared_pool *dst, void *seed_object, ae_int_t size_of_object, - void (*init)(void* dst, ae_state* state), - void (*init_copy)(void* dst, void* src, ae_state* state), + void (*init)(void* dst, ae_state* state, ae_bool make_automatic), + void (*init_copy)(void* dst, void* src, ae_state* state, ae_bool make_automatic), void (*destroy)(void* ptr), ae_state *state); void ae_shared_pool_retrieve( @@ -700,23 +865,32 @@ void ae_serializer_alloc_start(ae_serializer *serializer); void ae_serializer_alloc_entry(ae_serializer *serializer); +void ae_serializer_alloc_byte_array(ae_serializer *serializer, ae_vector *bytes); ae_int_t ae_serializer_get_alloc_size(ae_serializer *serializer); #ifdef AE_USE_CPP_SERIALIZATION void ae_serializer_sstart_str(ae_serializer *serializer, std::string *buf); void ae_serializer_ustart_str(ae_serializer *serializer, const std::string *buf); +void ae_serializer_sstart_stream(ae_serializer *serializer, std::ostream *stream); +void ae_serializer_ustart_stream(ae_serializer *serializer, const std::istream *stream); #endif void ae_serializer_sstart_str(ae_serializer *serializer, char *buf); void ae_serializer_ustart_str(ae_serializer *serializer, const char *buf); +void ae_serializer_sstart_stream(ae_serializer *serializer, ae_stream_writer writer, ae_int_t aux); +void ae_serializer_ustart_stream(ae_serializer *serializer, ae_stream_reader reader, ae_int_t aux); void ae_serializer_serialize_bool(ae_serializer *serializer, ae_bool v, ae_state *state); void ae_serializer_serialize_int(ae_serializer *serializer, ae_int_t v, ae_state *state); +void ae_serializer_serialize_int64(ae_serializer *serializer, ae_int64_t v, ae_state *state); void ae_serializer_serialize_double(ae_serializer *serializer, double v, ae_state *state); +void ae_serializer_serialize_byte_array(ae_serializer *serializer, ae_vector *bytes, ae_state *state); void ae_serializer_unserialize_bool(ae_serializer *serializer, ae_bool *v, ae_state *state); void ae_serializer_unserialize_int(ae_serializer *serializer, ae_int_t *v, ae_state *state); +void ae_serializer_unserialize_int64(ae_serializer *serializer, ae_int64_t *v, ae_state *state); void ae_serializer_unserialize_double(ae_serializer *serializer, double *v, ae_state *state); +void ae_serializer_unserialize_byte_array(ae_serializer *serializer, ae_vector *bytes, ae_state *state); -void ae_serializer_stop(ae_serializer *serializer); +void ae_serializer_stop(ae_serializer *serializer, ae_state *state); /************************************************************************ Service functions @@ -869,20 +1043,47 @@ ae_vector ra; ae_vector ca; } rcommstate; -void _rcommstate_init(rcommstate* p, ae_state *_state); -void _rcommstate_init_copy(rcommstate* dst, rcommstate* src, ae_state *_state); +void _rcommstate_init(rcommstate* p, ae_state *_state, ae_bool make_automatic); +void _rcommstate_init_copy(rcommstate* dst, rcommstate* src, ae_state *_state, ae_bool make_automatic); void _rcommstate_clear(rcommstate* p); void _rcommstate_destroy(rcommstate* p); + /************************************************************************ -Allocation counter, inactive by default. +Allocation counters, inactive by default. Turned on when needed for debugging purposes. + +_alloc_counter is incremented by 1 on malloc(), decremented on free(). +_alloc_counter_total is only incremented by 1. ************************************************************************/ -extern ae_int64_t _alloc_counter; +extern ae_int_t _alloc_counter; +extern ae_int_t _alloc_counter_total; extern ae_bool _use_alloc_counter; /************************************************************************ +Malloc debugging: + +* _force_malloc_failure - set this flag to ae_true in order to enforce + failure of ALGLIB malloc(). Useful to debug handling of errors during + memory allocation. As long as this flag is set, ALGLIB malloc will fail. +* _malloc_failure_after - set it to non-zero value in order to enforce + malloc failure as soon as _alloc_counter_total increases above value of + this variable. This value has no effect if _use_alloc_counter is not + set. +************************************************************************/ +extern ae_bool _force_malloc_failure; +extern ae_int_t _malloc_failure_after; + + +/************************************************************************ +Trace file descriptor (to be used by ALGLIB code which sends messages to +trace log) +************************************************************************/ +extern FILE *alglib_trace_file; + + +/************************************************************************ debug functions (must be turned on by preprocessor definitions): * tickcount(), which is wrapper around GetTickCount() * flushconsole(), fluches console @@ -926,6 +1127,7 @@ /******************************************************************** Exception class. ********************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) class ap_error { public: @@ -937,6 +1139,7 @@ static void make_assertion(bool bClause, const char *p_msg); private: }; +#endif /******************************************************************** Complex number with double precision. @@ -964,14 +1167,16 @@ alglib_impl::ae_complex* c_ptr(); const alglib_impl::ae_complex* c_ptr() const; +#if !defined(AE_NO_EXCEPTIONS) std::string tostring(int dps) const; +#endif double x, y; }; const alglib::complex operator/(const alglib::complex& lhs, const alglib::complex& rhs); -const bool operator==(const alglib::complex& lhs, const alglib::complex& rhs); -const bool operator!=(const alglib::complex& lhs, const alglib::complex& rhs); +bool operator==(const alglib::complex& lhs, const alglib::complex& rhs); +bool operator!=(const alglib::complex& lhs, const alglib::complex& rhs); const alglib::complex operator+(const alglib::complex& lhs); const alglib::complex operator-(const alglib::complex& lhs); const alglib::complex operator+(const alglib::complex& lhs, const alglib::complex& rhs); @@ -989,7 +1194,6 @@ double abscomplex(const alglib::complex &z); alglib::complex conj(const alglib::complex &z); alglib::complex csqr(const alglib::complex &z); -void setnworkers(alglib::ae_int_t nworkers); /******************************************************************** Level 1 BLAS functions @@ -1071,10 +1275,37 @@ void vmul(alglib::complex *vdst, ae_int_t N, alglib::complex alpha); +/******************************************************************** +xparams type and several predefined constants +********************************************************************/ +struct xparams +{ + alglib_impl::ae_uint64_t flags; +}; + +extern const xparams &xdefault; +extern const xparams &serial; +extern const xparams ∥ + +/******************************************************************** +Threading functions +********************************************************************/ +// nworkers can be 1, 2, ... ; or 0 for auto; or -1/-2/... for all except for one/two/... +void setnworkers(alglib::ae_int_t nworkers); + +// sets global threading settings to alglib::serial or alglib::parallel +void setglobalthreading(const xparams settings); + +// nworkers can be 1, 2, ... ; or 0 for auto; or -1/-2/... for all except for one/two/... +alglib::ae_int_t getnworkers(); /******************************************************************** -string conversion functions !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +internal functions used by test_x.cpp, interfaces for functions present +in commercial ALGLIB but lacking in free edition. ********************************************************************/ +ae_int_t _ae_cores_count(); +void _ae_set_global_threading(alglib_impl::ae_uint64_t flg_value); +alglib_impl::ae_uint64_t _ae_get_global_threading(); /******************************************************************** 1- and 2-dimensional arrays @@ -1082,40 +1313,86 @@ class ae_vector_wrapper { public: - ae_vector_wrapper(); + // + // Creates object attached to external ae_vector structure. + // + // NOTE: this function also checks that source ae_vector* has + // required datatype. An exception is generated otherwise. + // + ae_vector_wrapper(alglib_impl::ae_vector *e_ptr, alglib_impl::ae_datatype datatype); + + // + // Creates zero-size vector of specific datatype + // + ae_vector_wrapper(alglib_impl::ae_datatype datatype); + + // + // Creates a copy of another vector (can be reference to one of the derived classes) + // + // NOTE: this function also checks that source ae_vector* has + // required datatype. An exception is generated otherwise. + // + ae_vector_wrapper(const ae_vector_wrapper &rhs, alglib_impl::ae_datatype datatype); + + // + // Well, it is destructor... + // virtual ~ae_vector_wrapper(); + // + // For wrapper object allocated with allocate_own() this function + // changes length, completely dropping previous contents. + // + // It does not work (throws exception) for frozen proxy objects. + // void setlength(ae_int_t iLen); + + // + // Element count + // ae_int_t length() const; - - void attach_to(alglib_impl::ae_vector *ptr); - void allocate_own(ae_int_t size, alglib_impl::ae_datatype datatype); + + // + // Access to internal C-structure used by C-core. + // Not intended for external use. + // const alglib_impl::ae_vector* c_ptr() const; alglib_impl::ae_vector* c_ptr(); private: + ae_vector_wrapper(); ae_vector_wrapper(const ae_vector_wrapper &rhs); const ae_vector_wrapper& operator=(const ae_vector_wrapper &rhs); protected: - // - // Copies source vector RHS into current object. - // - // Current object is considered empty (this function should be - // called from copy constructor). - // - void create(const ae_vector_wrapper &rhs); - +#if !defined(AE_NO_EXCEPTIONS) // // Copies array given by string into current object. Additional // parameter DATATYPE contains information about type of the data // in S and type of the array to create. // - // Current object is considered empty (this function should be - // called from copy constructor). + // NOTE: this function is not supported in exception-free mode. // - void create(const char *s, alglib_impl::ae_datatype datatype); - + ae_vector_wrapper(const char *s, alglib_impl::ae_datatype datatype); +#endif + // - // Assigns RHS to current object. + // This function attaches wrapper object to external x_vector structure; + // "frozen proxy" mode is activated (you can read/write, but can not reallocate + // and do not own memory of the vector). + // + // NOTE: initial state of wrapper object is assumed to be initialized; + // all previously allocated memory is properly deallocated. + // + // NOTE: x_vector structure pointed by new_ptr is used only once; after + // we fetch pointer to memory and its size, this structure is ignored + // and not referenced anymore. So, you can pass pointers to temporary + // x-structures which are deallocated immediately after you call attach_to() + // + // NOTE: state structure is used for error reporting purposes (longjmp on errors). + // + void attach_to(alglib_impl::x_vector *new_ptr, alglib_impl::ae_state *_state); + + // + // Assigns RHS to current object. Returns *this. // // It has several branches depending on target object status: // * in case it is proxy object, data are copied into memory pointed by @@ -1126,17 +1403,43 @@ // // NOTE: this function correctly handles assignments of the object to itself. // - void assign(const ae_vector_wrapper &rhs); + const ae_vector_wrapper& assign(const ae_vector_wrapper &rhs); + + // + // Pointer to ae_vector structure: + // * ptr==&inner_vec means that wrapper object owns ae_vector structure and + // is responsible for proper deallocation of its memory + // * ptr!=&inner_vec means that wrapper object works with someone's other + // ae_vector record and is not responsible for its memory; in this case + // inner_vec is assumed to be uninitialized. + // + alglib_impl::ae_vector *ptr; + + // + // Inner ae_vector record. + // Ignored for ptr!=&inner_rec. + // + alglib_impl::ae_vector inner_vec; - alglib_impl::ae_vector *p_vec; - alglib_impl::ae_vector vec; + // + // Whether this wrapper object is frozen proxy (you may read array, may + // modify its value, but can not deallocate its memory or resize it) or not. + // + // If is_frozen_proxy==true and if: + // * ptr==&inner_vec, it means that wrapper works with its own ae_vector + // structure, but this structure points to externally allocated memory. + // This memory is NOT owned by ae_vector object. + // * ptr!=&inner_vec, it means that wrapper works with externally allocated + // and managed ae_vector structure. Both memory pointed by ae_vector and + // ae_vector structure itself are not owned by wrapper object. + // + bool is_frozen_proxy; }; class boolean_1d_array : public ae_vector_wrapper { public: boolean_1d_array(); - boolean_1d_array(const char *s); boolean_1d_array(const boolean_1d_array &rhs); boolean_1d_array(alglib_impl::ae_vector *p); const boolean_1d_array& operator=(const boolean_1d_array &rhs); @@ -1148,18 +1451,29 @@ const ae_bool& operator[](ae_int_t i) const; ae_bool& operator[](ae_int_t i); + // + // This function allocates array[iLen] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // void setcontent(ae_int_t iLen, const bool *pContent ); + + // + // This function returns pointer to internal memory + // ae_bool* getcontent(); const ae_bool* getcontent() const; +#if !defined(AE_NO_EXCEPTIONS) + boolean_1d_array(const char *s); std::string tostring() const; +#endif }; class integer_1d_array : public ae_vector_wrapper { public: integer_1d_array(); - integer_1d_array(const char *s); integer_1d_array(const integer_1d_array &rhs); integer_1d_array(alglib_impl::ae_vector *p); const integer_1d_array& operator=(const integer_1d_array &rhs); @@ -1171,19 +1485,29 @@ const ae_int_t& operator[](ae_int_t i) const; ae_int_t& operator[](ae_int_t i); + // + // This function allocates array[iLen] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // void setcontent(ae_int_t iLen, const ae_int_t *pContent ); - + + // + // This function returns pointer to internal memory + // ae_int_t* getcontent(); const ae_int_t* getcontent() const; +#if !defined(AE_NO_EXCEPTIONS) + integer_1d_array(const char *s); std::string tostring() const; +#endif }; class real_1d_array : public ae_vector_wrapper { public: real_1d_array(); - real_1d_array(const char *s); real_1d_array(const real_1d_array &rhs); real_1d_array(alglib_impl::ae_vector *p); const real_1d_array& operator=(const real_1d_array &rhs); @@ -1195,18 +1519,41 @@ const double& operator[](ae_int_t i) const; double& operator[](ae_int_t i); - void setcontent(ae_int_t iLen, const double *pContent ); + // + // This function allocates array[iLen] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // + void setcontent(ae_int_t iLen, const double *pContent); + + // + // This function attaches array to memory pointed by pContent. + // No own memory is allocated, no copying of data is performed, + // so pContent pointer should be valid as long as we work with + // array. + // + // After you attach array object to external memory, it becomes + // "frozen": it is possible to read/write array elements, but + // it is not allowed to resize it (no setlength() calls). + // + void attach_to_ptr(ae_int_t iLen, double *pContent); + + // + // This function returns pointer to internal memory + // double* getcontent(); const double* getcontent() const; +#if !defined(AE_NO_EXCEPTIONS) + real_1d_array(const char *s); std::string tostring(int dps) const; +#endif }; class complex_1d_array : public ae_vector_wrapper { public: complex_1d_array(); - complex_1d_array(const char *s); complex_1d_array(const complex_1d_array &rhs); complex_1d_array(alglib_impl::ae_vector *p); const complex_1d_array& operator=(const complex_1d_array &rhs); @@ -1218,19 +1565,46 @@ const alglib::complex& operator[](ae_int_t i) const; alglib::complex& operator[](ae_int_t i); + // + // This function allocates array[iLen] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // void setcontent(ae_int_t iLen, const alglib::complex *pContent ); alglib::complex* getcontent(); const alglib::complex* getcontent() const; +#if !defined(AE_NO_EXCEPTIONS) + complex_1d_array(const char *s); std::string tostring(int dps) const; +#endif }; class ae_matrix_wrapper { public: - ae_matrix_wrapper(); + // + // Creates object attached to external ae_vector structure, with additional + // check for matching datatypes (e_ptr->datatype==datatype is required). + // + ae_matrix_wrapper(alglib_impl::ae_matrix *e_ptr, alglib_impl::ae_datatype datatype); + + // + // Creates zero-sized matrix of specified datatype. + // + ae_matrix_wrapper(alglib_impl::ae_datatype datatype); + + // + // Creates copy of rhs, with additional check for matching datatypes + // (rhs.datatype==datatype is required). + // + ae_matrix_wrapper(const ae_matrix_wrapper &rhs, alglib_impl::ae_datatype datatype); + + // + // Destructor + // virtual ~ae_matrix_wrapper(); - const ae_matrix_wrapper& operator=(const ae_matrix_wrapper &rhs); + void setlength(ae_int_t rows, ae_int_t cols); ae_int_t rows() const; @@ -1238,30 +1612,51 @@ bool isempty() const; ae_int_t getstride() const; - void attach_to(alglib_impl::ae_matrix *ptr); - void allocate_own(ae_int_t rows, ae_int_t cols, alglib_impl::ae_datatype datatype); const alglib_impl::ae_matrix* c_ptr() const; alglib_impl::ae_matrix* c_ptr(); private: + ae_matrix_wrapper(); ae_matrix_wrapper(const ae_matrix_wrapper &rhs); + const ae_matrix_wrapper& operator=(const ae_matrix_wrapper &rhs); protected: +#if !defined(AE_NO_EXCEPTIONS) // - // Copies source matrix RHS into current object. + // Copies array given by string into current object. Additional + // parameter DATATYPE contains information about type of the data + // in S and type of the array to create. // // Current object is considered empty (this function should be // called from copy constructor). // - void create(const ae_matrix_wrapper &rhs); + ae_matrix_wrapper(const char *s, alglib_impl::ae_datatype datatype); +#endif // - // Copies array given by string into current object. Additional - // parameter DATATYPE contains information about type of the data - // in S and type of the array to create. + // This function attaches wrapper object to external x_vector structure; + // "frozen proxy" mode is activated (you can read/write, but can not reallocate + // and do not own memory of the vector). // - // Current object is considered empty (this function should be - // called from copy constructor). + // NOTE: initial state of wrapper object is assumed to be initialized; + // all previously allocated memory is properly deallocated. + // + // NOTE: x_vector structure pointed by new_ptr is used only once; after + // we fetch pointer to memory and its size, this structure is ignored + // and not referenced anymore. So, you can pass pointers to temporary + // x-structures which are deallocated immediately after you call attach_to() + // + // NOTE: state structure is used for error-handling (a longjmp is performed + // on allocation error). All previously allocated memory is correctly + // freed on error. // - void create(const char *s, alglib_impl::ae_datatype datatype); + void attach_to(alglib_impl::x_matrix *new_ptr, alglib_impl::ae_state *_state); + + // + // This function initializes matrix and allocates own memory storage. + // + // NOTE: initial state of wrapper object is assumed to be uninitialized; + // if ptr!=NULL on entry, it is considered critical error (abort is called). + // + void init(ae_int_t rows, ae_int_t cols, alglib_impl::ae_datatype datatype, alglib_impl::ae_state *_state); // // Assigns RHS to current object. @@ -1275,10 +1670,38 @@ // // NOTE: this function correctly handles assignments of the object to itself. // - void assign(const ae_matrix_wrapper &rhs); + const ae_matrix_wrapper & assign(const ae_matrix_wrapper &rhs); + + + // + // Pointer to ae_matrix structure: + // * ptr==&inner_mat means that wrapper object owns ae_matrix structure and + // is responsible for proper deallocation of its memory + // * ptr!=&inner_mat means that wrapper object works with someone's other + // ae_matrix record and is not responsible for its memory; in this case + // inner_mat is assumed to be uninitialized. + // + alglib_impl::ae_matrix *ptr; - alglib_impl::ae_matrix *p_mat; - alglib_impl::ae_matrix mat; + // + // Inner ae_matrix record. + // Ignored for ptr!=&inner_mat. + // + alglib_impl::ae_matrix inner_mat; + + // + // Whether this wrapper object is frozen proxy (you may read array, may + // modify its value, but can not deallocate its memory or resize it) or not. + // + // If is_frozen_proxy==true and if: + // * ptr==&inner_vec, it means that wrapper works with its own ae_vector + // structure, but this structure points to externally allocated memory. + // This memory is NOT owned by ae_vector object. + // * ptr!=&inner_vec, it means that wrapper works with externally allocated + // and managed ae_vector structure. Both memory pointed by ae_vector and + // ae_vector structure itself are not owned by wrapper object. + // + bool is_frozen_proxy; }; class boolean_2d_array : public ae_matrix_wrapper @@ -1287,18 +1710,27 @@ boolean_2d_array(); boolean_2d_array(const boolean_2d_array &rhs); boolean_2d_array(alglib_impl::ae_matrix *p); - boolean_2d_array(const char *s); virtual ~boolean_2d_array(); + + const boolean_2d_array& operator=(const boolean_2d_array &rhs); const ae_bool& operator()(ae_int_t i, ae_int_t j) const; ae_bool& operator()(ae_int_t i, ae_int_t j); const ae_bool* operator[](ae_int_t i) const; ae_bool* operator[](ae_int_t i); - + + // + // This function allocates array[irows,icols] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // void setcontent(ae_int_t irows, ae_int_t icols, const bool *pContent ); +#if !defined(AE_NO_EXCEPTIONS) + boolean_2d_array(const char *s); std::string tostring() const ; +#endif }; class integer_2d_array : public ae_matrix_wrapper @@ -1307,8 +1739,9 @@ integer_2d_array(); integer_2d_array(const integer_2d_array &rhs); integer_2d_array(alglib_impl::ae_matrix *p); - integer_2d_array(const char *s); virtual ~integer_2d_array(); + + const integer_2d_array& operator=(const integer_2d_array &rhs); const ae_int_t& operator()(ae_int_t i, ae_int_t j) const; ae_int_t& operator()(ae_int_t i, ae_int_t j); @@ -1316,9 +1749,18 @@ const ae_int_t* operator[](ae_int_t i) const; ae_int_t* operator[](ae_int_t i); + // + // This function allocates array[irows,icols] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // void setcontent(ae_int_t irows, ae_int_t icols, const ae_int_t *pContent ); + +#if !defined(AE_NO_EXCEPTIONS) + integer_2d_array(const char *s); std::string tostring() const; +#endif }; class real_2d_array : public ae_matrix_wrapper @@ -1327,8 +1769,9 @@ real_2d_array(); real_2d_array(const real_2d_array &rhs); real_2d_array(alglib_impl::ae_matrix *p); - real_2d_array(const char *s); virtual ~real_2d_array(); + + const real_2d_array& operator=(const real_2d_array &rhs); const double& operator()(ae_int_t i, ae_int_t j) const; double& operator()(ae_int_t i, ae_int_t j); @@ -1336,9 +1779,30 @@ const double* operator[](ae_int_t i) const; double* operator[](ae_int_t i); - void setcontent(ae_int_t irows, ae_int_t icols, const double *pContent ); + // + // This function allocates array[irows,icols] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // + void setcontent(ae_int_t irows, ae_int_t icols, const double *pContent); + + // + // This function attaches array to memory pointed by pContent: + // * only minor amount of own memory is allocated - O(irows) bytes to + // store precomputed pointers; but no costly copying of O(rows*cols) + // data is performed. + // * pContent pointer should be valid as long as we work with array + // + // After you attach array object to external memory, it becomes + // "frozen": it is possible to read/write array elements, but + // it is not allowed to resize it (no setlength() calls). + // + void attach_to_ptr(ae_int_t irows, ae_int_t icols, double *pContent); +#if !defined(AE_NO_EXCEPTIONS) + real_2d_array(const char *s); std::string tostring(int dps) const; +#endif }; class complex_2d_array : public ae_matrix_wrapper @@ -1347,8 +1811,9 @@ complex_2d_array(); complex_2d_array(const complex_2d_array &rhs); complex_2d_array(alglib_impl::ae_matrix *p); - complex_2d_array(const char *s); virtual ~complex_2d_array(); + + const complex_2d_array& operator=(const complex_2d_array &rhs); const alglib::complex& operator()(ae_int_t i, ae_int_t j) const; alglib::complex& operator()(ae_int_t i, ae_int_t j); @@ -1356,9 +1821,17 @@ const alglib::complex* operator[](ae_int_t i) const; alglib::complex* operator[](ae_int_t i); + // + // This function allocates array[irows,icols] and copies data + // pointed by pContent to its memory. Completely independent + // copy of data is created. + // void setcontent(ae_int_t irows, ae_int_t icols, const alglib::complex *pContent ); +#if !defined(AE_NO_EXCEPTIONS) + complex_2d_array(const char *s); std::string tostring(int dps) const; +#endif }; /******************************************************************** @@ -1403,46 +1876,42 @@ * field contents is not recognized by atof() - field value is replaced by 0.0 ********************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) void read_csv(const char *filename, char separator, int flags, alglib::real_2d_array &out); +#endif /******************************************************************** -dataset information. +This function activates trace output, with trace log being saved to +file (appended to the end). -can store regression dataset, classification dataset, or non-labeled -task: -* nout==0 means non-labeled task (clustering, for example) -* nout>0 && nclasses==0 means regression task -* nout>0 && nclasses>0 means classification task +Tracing allows us to study behavior of ALGLIB solvers and to debug +their failures: +* tracing is limited by one/several ALGLIB parts specified by means + of trace tags, like "SLP" (for SLP solver) or "OPTGUARD" (OptGuard + integrity checker). +* some ALGLIB solvers support hierarchies of trace tags which activate + different kinds of tracing. Say, "SLP" defines some basic tracing, + but "SLP.PROBING" defines more detailed and costly tracing. +* generally, "TRACETAG.SUBTAG" also implicitly activates logging + which is activated by "TRACETAG" +* you may define multiple trace tags by separating them with commas, + like "SLP,OPTGUARD,SLP.PROBING" +* trace tags are case-insensitive +* spaces/tabs are NOT allowed in the tags string + +Trace log is saved to file "filename", which is opened in the append +mode. If no file with such name can be opened, tracing won't be +performed (but no exception will be generated). ********************************************************************/ -/*class dataset -{ -public: - dataset():nin(0), nout(0), nclasses(0), trnsize(0), valsize(0), tstsize(0), totalsize(0){}; +void trace_file(std::string tags, std::string filename); - int nin, nout, nclasses; - int trnsize; - int valsize; - int tstsize; - int totalsize; - - alglib::real_2d_array trn; - alglib::real_2d_array val; - alglib::real_2d_array tst; - alglib::real_2d_array all; -}; - -bool opendataset(std::string file, dataset *pdataset); +/******************************************************************** +This function disables tracing. +********************************************************************/ +void trace_disable(); -// -// internal functions -// -std::string strtolower(const std::string &s); -bool readstrings(std::string file, std::list *pOutput); -bool readstrings(std::string file, std::list *pOutput, std::string comment); -void explodestring(std::string s, char sep, std::vector *pOutput); -std::string xtrim(std::string s);*/ /******************************************************************** Constants and functions introduced for compatibility with AlgoPascal @@ -1484,10 +1953,47 @@ bool fp_isinf(double x); bool fp_isfinite(double x); +/******************************************************************** +Exception handling macros +********************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) +/////////////////////////////////////// +// exception-based code +////////////////////////////// +#define _ALGLIB_CPP_EXCEPTION(msg) throw alglib::ap_error(msg) +#define _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN try{ +#define _ALGLIB_CALLBACK_EXCEPTION_GUARD_END }catch(...){ ae_clean_up_before_breaking(&_alglib_env_state); throw; } + +#else + +/////////////////////////////////////// +// Exception-free version +////////////////////////////// +#if AE_OS!=AE_UNKNOWN +#error Exception-free mode can not be combined with AE_OS definition +#endif +#if AE_THREADING!=AE_SERIAL_UNSAFE +#error Exception-free mode is thread-unsafe; define AE_THREADING=AE_SERIAL_UNSAFE to prove that you know it +#endif +#define _ALGLIB_CALLBACK_EXCEPTION_GUARD_BEGIN +#define _ALGLIB_CALLBACK_EXCEPTION_GUARD_END +#define _ALGLIB_SET_ERROR_FLAG(s) set_error_flag(s) + +// sets eror flag and (optionally) sets error message +void set_error_flag(const char *s = NULL); + +// returns error flag and optionally returns error message (loaded to *p_msg); +// if error flag is not set (or p_msg is NULL) *p_msg is not changed. +bool get_error_flag(const char **p_msg = NULL); + +// clears error flag (it is not cleared until explicit call to this function) +void clear_error_flag(); +#endif }//namespace alglib + ///////////////////////////////////////////////////////////////////////// // // THIS SECTIONS CONTAINS DECLARATIONS FOR OPTIMIZED LINEAR ALGEBRA CODES @@ -1626,6 +2132,16 @@ ae_int_t uoffs, ae_vector *v, ae_int_t voffs); +ae_bool _ialglib_i_rmatrixgerf(ae_int_t m, + ae_int_t n, + ae_matrix *a, + ae_int_t ia, + ae_int_t ja, + double alpha, + ae_vector *u, + ae_int_t uoffs, + ae_vector *v, + ae_int_t voffs); @@ -1644,5 +2160,2302 @@ } +///////////////////////////////////////////////////////////////////////// +// +// THIS SECTION CONTAINS DEFINITIONS FOR PARTIAL COMPILATION +// +///////////////////////////////////////////////////////////////////////// +#ifdef AE_COMPILE_SCODES +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_APSERV +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_TSORT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#endif + +#ifdef AE_COMPILE_NEARESTNEIGHBOR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_SCODES +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#endif + +#ifdef AE_COMPILE_HQRND +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#endif + +#ifdef AE_COMPILE_XDEBUG +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_ODESOLVER +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#endif + +#ifdef AE_COMPILE_ABLASMKL +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_SPARSE +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#endif + +#ifdef AE_COMPILE_ABLASF +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_ABLAS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#endif + +#ifdef AE_COMPILE_DLU +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#endif + +#ifdef AE_COMPILE_SPTRF +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#endif + +#ifdef AE_COMPILE_CREFLECTIONS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_MATGEN +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#endif + +#ifdef AE_COMPILE_ROTATIONS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_TRFAC +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#endif + +#ifdef AE_COMPILE_TRLINSOLVE +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_SAFESOLVE +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_RCOND +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#endif + +#ifdef AE_COMPILE_MATINV +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#endif + +#ifdef AE_COMPILE_HBLAS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_SBLAS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#endif + +#ifdef AE_COMPILE_ORTFAC +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#endif + +#ifdef AE_COMPILE_FBLS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#endif + +#ifdef AE_COMPILE_CQMODELS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#endif + +#ifdef AE_COMPILE_OPTGUARDAPI +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#endif + +#ifdef AE_COMPILE_BLAS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_BDSVD +#define AE_PARTIAL_BUILD +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#endif + +#ifdef AE_COMPILE_SVD +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#endif + +#ifdef AE_COMPILE_OPTSERV +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#endif + +#ifdef AE_COMPILE_SNNLS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#endif + +#ifdef AE_COMPILE_SACTIVESETS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#endif + +#ifdef AE_COMPILE_QQPSOLVER +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#endif + +#ifdef AE_COMPILE_LINMIN +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_XBLAS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_DIRECTDENSESOLVERS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_XBLAS +#endif + +#ifdef AE_COMPILE_LPQPSERV +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#endif + +#ifdef AE_COMPILE_VIPMSOLVER +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_MATINV +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_LPQPSERV +#endif + +#ifdef AE_COMPILE_NLCSQP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_VIPMSOLVER +#endif + +#ifdef AE_COMPILE_MINLBFGS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#endif + +#ifdef AE_COMPILE_NORMESTIMATOR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#endif + +#ifdef AE_COMPILE_LINLSQR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#endif + +#ifdef AE_COMPILE_QPDENSEAULSOLVER +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_LINMIN +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#endif + +#ifdef AE_COMPILE_MINBLEIC +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#endif + +#ifdef AE_COMPILE_QPBLEICSOLVER +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_LINMIN +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#endif + +#ifdef AE_COMPILE_MINQP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_LINMIN +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#endif + +#ifdef AE_COMPILE_REVISEDDUALSIMPLEX +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#endif + +#ifdef AE_COMPILE_MINLP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_REVISEDDUALSIMPLEX +#endif + +#ifdef AE_COMPILE_NLCSLP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_REVISEDDUALSIMPLEX +#endif + +#ifdef AE_COMPILE_MINNLC +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_REVISEDDUALSIMPLEX +#define AE_COMPILE_NLCSLP +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_NLCSQP +#endif + +#ifdef AE_COMPILE_MINBC +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#endif + +#ifdef AE_COMPILE_MINNS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#endif + +#ifdef AE_COMPILE_MINCOMP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#endif + +#ifdef AE_COMPILE_MINCG +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#endif + +#ifdef AE_COMPILE_MINLM +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_LINMIN +#define AE_COMPILE_FBLS +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#endif + +#ifdef AE_COMPILE_HSSCHUR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_BLAS +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#endif + +#ifdef AE_COMPILE_BASICSTATOPS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#endif + +#ifdef AE_COMPILE_EVD +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#endif + +#ifdef AE_COMPILE_BASESTAT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#endif + +#ifdef AE_COMPILE_PCA +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_BASESTAT +#endif + +#ifdef AE_COMPILE_BDSS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#endif + +#ifdef AE_COMPILE_HPCCORES +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_MLPBASE +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_BDSS +#define AE_COMPILE_HPCCORES +#define AE_COMPILE_SCODES +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#endif + +#ifdef AE_COMPILE_LDA +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#endif + +#ifdef AE_COMPILE_SSA +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#endif + +#ifdef AE_COMPILE_GAMMAFUNC +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_NORMALDISTR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#endif + +#ifdef AE_COMPILE_IGAMMAF +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#endif + +#ifdef AE_COMPILE_LINREG +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IGAMMAF +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#endif + +#ifdef AE_COMPILE_FILTERS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IGAMMAF +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINREG +#endif + +#ifdef AE_COMPILE_LOGIT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_BDSS +#define AE_COMPILE_HPCCORES +#define AE_COMPILE_SCODES +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_MLPBASE +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#endif + +#ifdef AE_COMPILE_MCPD +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_LINMIN +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#endif + +#ifdef AE_COMPILE_MLPE +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_BDSS +#define AE_COMPILE_HPCCORES +#define AE_COMPILE_SCODES +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_MLPBASE +#endif + +#ifdef AE_COMPILE_MLPTRAIN +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_BDSS +#define AE_COMPILE_HPCCORES +#define AE_COMPILE_SCODES +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_MLPBASE +#define AE_COMPILE_MLPE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_LINMIN +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#endif + +#ifdef AE_COMPILE_CLUSTERING +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_BLAS +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_BASESTAT +#endif + +#ifdef AE_COMPILE_DFOREST +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_SCODES +#define AE_COMPILE_TSORT +#define AE_COMPILE_HQRND +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_BDSS +#endif + +#ifdef AE_COMPILE_KNN +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_SCODES +#define AE_COMPILE_TSORT +#define AE_COMPILE_HQRND +#define AE_COMPILE_NEARESTNEIGHBOR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_BDSS +#endif + +#ifdef AE_COMPILE_DATACOMP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_BLAS +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_BASESTAT +#define AE_COMPILE_CLUSTERING +#endif + +#ifdef AE_COMPILE_GQ +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_GAMMAFUNC +#endif + +#ifdef AE_COMPILE_GKQ +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_GQ +#endif + +#ifdef AE_COMPILE_AUTOGK +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_GQ +#define AE_COMPILE_GKQ +#endif + +#ifdef AE_COMPILE_NTHEORY +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_FTBASE +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_NTHEORY +#endif + +#ifdef AE_COMPILE_FFT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_NTHEORY +#define AE_COMPILE_FTBASE +#endif + +#ifdef AE_COMPILE_FHT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_NTHEORY +#define AE_COMPILE_FTBASE +#define AE_COMPILE_FFT +#endif + +#ifdef AE_COMPILE_CONV +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_NTHEORY +#define AE_COMPILE_FTBASE +#define AE_COMPILE_FFT +#endif + +#ifdef AE_COMPILE_CORR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_NTHEORY +#define AE_COMPILE_FTBASE +#define AE_COMPILE_FFT +#define AE_COMPILE_CONV +#endif + +#ifdef AE_COMPILE_IDW +#define AE_PARTIAL_BUILD +#define AE_COMPILE_SCODES +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_NEARESTNEIGHBOR +#define AE_COMPILE_HQRND +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#endif + +#ifdef AE_COMPILE_RATINT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#endif + +#ifdef AE_COMPILE_FITSPHERE +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#define AE_COMPILE_MINLM +#define AE_COMPILE_REVISEDDUALSIMPLEX +#define AE_COMPILE_NLCSLP +#define AE_COMPILE_NLCSQP +#define AE_COMPILE_MINNLC +#endif + +#ifdef AE_COMPILE_INTFITSERV +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#endif + +#ifdef AE_COMPILE_SPLINE1D +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#endif + +#ifdef AE_COMPILE_PARAMETRIC +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_SPLINE1D +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_GQ +#define AE_COMPILE_GKQ +#define AE_COMPILE_AUTOGK +#endif + +#ifdef AE_COMPILE_SPLINE3D +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_SPLINE1D +#endif + +#ifdef AE_COMPILE_POLINT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_RATINT +#endif + +#ifdef AE_COMPILE_LSFIT +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_RATINT +#define AE_COMPILE_POLINT +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_SPLINE1D +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_LINMIN +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#define AE_COMPILE_MINLM +#endif + +#ifdef AE_COMPILE_RBFV2 +#define AE_PARTIAL_BUILD +#define AE_COMPILE_SCODES +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_NEARESTNEIGHBOR +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_RATINT +#define AE_COMPILE_POLINT +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_SPLINE1D +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_LINMIN +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#define AE_COMPILE_MINLM +#define AE_COMPILE_LSFIT +#endif + +#ifdef AE_COMPILE_SPLINE2D +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_SCODES +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_FBLS +#define AE_COMPILE_SPLINE1D +#endif + +#ifdef AE_COMPILE_RBFV1 +#define AE_PARTIAL_BUILD +#define AE_COMPILE_SCODES +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_NEARESTNEIGHBOR +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_RATINT +#define AE_COMPILE_POLINT +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_SPLINE1D +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_LINMIN +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#define AE_COMPILE_MINLM +#define AE_COMPILE_LSFIT +#endif + +#ifdef AE_COMPILE_RBF +#define AE_PARTIAL_BUILD +#define AE_COMPILE_SCODES +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_NEARESTNEIGHBOR +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_RATINT +#define AE_COMPILE_POLINT +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_SPLINE1D +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_LINMIN +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#define AE_COMPILE_MINLM +#define AE_COMPILE_LSFIT +#define AE_COMPILE_RBFV1 +#define AE_COMPILE_RBFV2 +#endif + +#ifdef AE_COMPILE_INTCOMP +#define AE_PARTIAL_BUILD +#define AE_COMPILE_LINMIN +#define AE_COMPILE_APSERV +#define AE_COMPILE_TSORT +#define AE_COMPILE_OPTGUARDAPI +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_HQRND +#define AE_COMPILE_MATGEN +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_BDSVD +#define AE_COMPILE_SVD +#define AE_COMPILE_OPTSERV +#define AE_COMPILE_FBLS +#define AE_COMPILE_CQMODELS +#define AE_COMPILE_SNNLS +#define AE_COMPILE_SACTIVESETS +#define AE_COMPILE_MINBLEIC +#define AE_COMPILE_XBLAS +#define AE_COMPILE_DIRECTDENSESOLVERS +#define AE_COMPILE_NORMESTIMATOR +#define AE_COMPILE_LINLSQR +#define AE_COMPILE_MINLBFGS +#define AE_COMPILE_LPQPSERV +#define AE_COMPILE_QQPSOLVER +#define AE_COMPILE_QPDENSEAULSOLVER +#define AE_COMPILE_QPBLEICSOLVER +#define AE_COMPILE_VIPMSOLVER +#define AE_COMPILE_MINQP +#define AE_COMPILE_MINLM +#define AE_COMPILE_REVISEDDUALSIMPLEX +#define AE_COMPILE_NLCSLP +#define AE_COMPILE_NLCSQP +#define AE_COMPILE_MINNLC +#define AE_COMPILE_FITSPHERE +#define AE_COMPILE_INTFITSERV +#define AE_COMPILE_SPLINE1D +#endif + +#ifdef AE_COMPILE_ELLIPTIC +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_HERMITE +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_DAWSON +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_TRIGINTEGRALS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_POISSONDISTR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IGAMMAF +#endif + +#ifdef AE_COMPILE_BESSEL +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_IBETAF +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#endif + +#ifdef AE_COMPILE_FDISTR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#endif + +#ifdef AE_COMPILE_FRESNEL +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_JACOBIANELLIPTIC +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_PSIF +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_EXPINTEGRALS +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_LAGUERRE +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_CHISQUAREDISTR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IGAMMAF +#endif + +#ifdef AE_COMPILE_LEGENDRE +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_BETAF +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#endif + +#ifdef AE_COMPILE_CHEBYSHEV +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_STUDENTTDISTR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#endif + +#ifdef AE_COMPILE_NEARUNITYUNIT +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_BINOMIALDISTR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#define AE_COMPILE_NEARUNITYUNIT +#endif + +#ifdef AE_COMPILE_AIRYF +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_WSR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#endif + +#ifdef AE_COMPILE_STEST +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#define AE_COMPILE_NEARUNITYUNIT +#define AE_COMPILE_BINOMIALDISTR +#endif + +#ifdef AE_COMPILE_CORRELATIONTESTS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#define AE_COMPILE_STUDENTTDISTR +#define AE_COMPILE_TSORT +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_BASESTAT +#endif + +#ifdef AE_COMPILE_STUDENTTTESTS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#define AE_COMPILE_STUDENTTDISTR +#endif + +#ifdef AE_COMPILE_MANNWHITNEYU +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#endif + +#ifdef AE_COMPILE_JARQUEBERA +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_VARIANCETESTS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_GAMMAFUNC +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_NORMALDISTR +#define AE_COMPILE_IBETAF +#define AE_COMPILE_FDISTR +#define AE_COMPILE_IGAMMAF +#define AE_COMPILE_CHISQUAREDISTR +#endif + +#ifdef AE_COMPILE_SCHUR +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#endif + +#ifdef AE_COMPILE_SPDGEVD +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#define AE_COMPILE_SBLAS +#define AE_COMPILE_BLAS +#define AE_COMPILE_TRLINSOLVE +#define AE_COMPILE_SAFESOLVE +#define AE_COMPILE_RCOND +#define AE_COMPILE_MATINV +#define AE_COMPILE_HBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#endif + +#ifdef AE_COMPILE_INVERSEUPDATE +#define AE_PARTIAL_BUILD +#endif + +#ifdef AE_COMPILE_MATDET +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#endif + +#ifdef AE_COMPILE_POLYNOMIALSOLVER +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_MATGEN +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_BLAS +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_HSSCHUR +#define AE_COMPILE_BASICSTATOPS +#define AE_COMPILE_EVD +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_TRFAC +#endif + +#ifdef AE_COMPILE_NLEQ +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_LINMIN +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_ABLAS +#define AE_COMPILE_HQRND +#define AE_COMPILE_HBLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_SBLAS +#define AE_COMPILE_ORTFAC +#define AE_COMPILE_FBLS +#endif + +#ifdef AE_COMPILE_DIRECTSPARSESOLVERS +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_DLU +#define AE_COMPILE_SPTRF +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#define AE_COMPILE_ROTATIONS +#define AE_COMPILE_TRFAC +#endif + +#ifdef AE_COMPILE_LINCG +#define AE_PARTIAL_BUILD +#define AE_COMPILE_APSERV +#define AE_COMPILE_ABLASMKL +#define AE_COMPILE_HQRND +#define AE_COMPILE_TSORT +#define AE_COMPILE_SPARSE +#define AE_COMPILE_ABLASF +#define AE_COMPILE_ABLAS +#define AE_COMPILE_CREFLECTIONS +#define AE_COMPILE_MATGEN +#endif + +#ifdef AE_COMPILE_ALGLIBBASICS +#define AE_PARTIAL_BUILD +#endif + + + #endif diff -Nru alglib-3.10.0/src/dataanalysis.cpp alglib-3.16.0/src/dataanalysis.cpp --- alglib-3.10.0/src/dataanalysis.cpp 2015-08-19 12:24:22.000000000 +0000 +++ alglib-3.16.0/src/dataanalysis.cpp 2019-12-19 10:28:27.000000000 +0000 @@ -1,5 +1,5 @@ /************************************************************************* -ALGLIB 3.10.0 (source code generated 2015-08-19) +ALGLIB 3.16.0 (source code generated 2019-12-19) Copyright (c) Sergey Bochkanov (ALGLIB project). >>> SOURCE LICENSE >>> @@ -17,17 +17,20 @@ http://www.fsf.org/licensing/licenses >>> END OF LICENSE >>> *************************************************************************/ +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS +#endif #include "stdafx.h" #include "dataanalysis.h" // disable some irrelevant warnings -#if (AE_COMPILER==AE_MSVC) +#if (AE_COMPILER==AE_MSVC) && !defined(AE_ALL_WARNINGS) #pragma warning(disable:4100) #pragma warning(disable:4127) +#pragma warning(disable:4611) #pragma warning(disable:4702) #pragma warning(disable:4996) #endif -using namespace std; ///////////////////////////////////////////////////////////////////////// // @@ -37,7 +40,306 @@ namespace alglib { +#if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_FILTERS) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_MLPE) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD) + +#endif + +#if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +Principal components analysis + +This function builds orthogonal basis where first axis corresponds to +direction with maximum variance, second axis maximizes variance in the +subspace orthogonal to first axis and so on. + +This function builds FULL basis, i.e. returns N vectors corresponding to +ALL directions, no matter how informative. If you need just a few (say, +10 or 50) of the most important directions, you may find it faster to use +one of the reduced versions: +* pcatruncatedsubspace() - for subspace iteration based method + +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + X - dataset, array[0..NPoints-1,0..NVars-1]. + matrix contains ONLY INDEPENDENT VARIABLES. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + +OUTPUT PARAMETERS: + Info - return code: + * -4, if SVD subroutine haven't converged + * -1, if wrong parameters has been passed (NPoints<0, + NVars<1) + * 1, if task is solved + S2 - array[0..NVars-1]. variance values corresponding + to basis vectors. + V - array[0..NVars-1,0..NVars-1] + matrix, whose columns store basis vectors. + + -- ALGLIB -- + Copyright 25.08.2008 by Bochkanov Sergey +*************************************************************************/ +void pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::pcabuildbasis(const_cast(x.c_ptr()), npoints, nvars, &info, const_cast(s2.c_ptr()), const_cast(v.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Principal components analysis + +This function performs truncated PCA, i.e. returns just a few most important +directions. + +Internally it uses iterative eigensolver which is very efficient when only +a minor fraction of full basis is required. Thus, if you need full basis, +it is better to use pcabuildbasis() function. + +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + X - dataset, array[0..NPoints-1,0..NVars-1]. + matrix contains ONLY INDEPENDENT VARIABLES. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NNeeded - number of requested components, in [1,NVars] range; + this function is efficient only for NNeeded<(x.c_ptr()), npoints, nvars, nneeded, eps, maxits, const_cast(s2.c_ptr()), const_cast(v.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Sparse truncated principal components analysis + +This function performs sparse truncated PCA, i.e. returns just a few most +important principal components for a sparse input X. + +Internally it uses iterative eigensolver which is very efficient when only +a minor fraction of full basis is required. + +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + X - sparse dataset, sparse npoints*nvars matrix. It is + recommended to use CRS sparse storage format; non-CRS + input will be internally converted to CRS. + Matrix contains ONLY INDEPENDENT VARIABLES, and must + be EXACTLY npoints*nvars. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NNeeded - number of requested components, in [1,NVars] range; + this function is efficient only for NNeeded<(x.c_ptr()), npoints, nvars, nneeded, eps, maxits, const_cast(s2.c_ptr()), const_cast(v.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +#endif + +#if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* Optimal binary classification @@ -65,20 +367,26 @@ -- ALGLIB -- Copyright 22.05.2008 by Bochkanov Sergey *************************************************************************/ -void dsoptimalsplit2(const real_1d_array &a, const integer_1d_array &c, const ae_int_t n, ae_int_t &info, double &threshold, double &pal, double &pbl, double &par, double &pbr, double &cve) +void dsoptimalsplit2(const real_1d_array &a, const integer_1d_array &c, const ae_int_t n, ae_int_t &info, double &threshold, double &pal, double &pbl, double &par, double &pbr, double &cve, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::dsoptimalsplit2(const_cast(a.c_ptr()), const_cast(c.c_ptr()), n, &info, &threshold, &pal, &pbl, &par, &pbr, &cve, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dsoptimalsplit2(const_cast(a.c_ptr()), const_cast(c.c_ptr()), n, &info, &threshold, &pal, &pbl, &par, &pbr, &cve, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -105,6418 +413,7101 @@ -- ALGLIB -- Copyright 11.12.2008 by Bochkanov Sergey *************************************************************************/ -void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_array &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int_t &info, double &threshold, double &rms, double &cvrms) +void dsoptimalsplit2fast(real_1d_array &a, integer_1d_array &c, integer_1d_array &tiesbuf, integer_1d_array &cntbuf, real_1d_array &bufr, integer_1d_array &bufi, const ae_int_t n, const ae_int_t nc, const double alpha, ae_int_t &info, double &threshold, double &rms, double &cvrms, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::dsoptimalsplit2fast(const_cast(a.c_ptr()), const_cast(c.c_ptr()), const_cast(tiesbuf.c_ptr()), const_cast(cntbuf.c_ptr()), const_cast(bufr.c_ptr()), const_cast(bufi.c_ptr()), n, nc, alpha, &info, &threshold, &rms, &cvrms, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dsoptimalsplit2fast(const_cast(a.c_ptr()), const_cast(c.c_ptr()), const_cast(tiesbuf.c_ptr()), const_cast(cntbuf.c_ptr()), const_cast(bufr.c_ptr()), const_cast(bufi.c_ptr()), n, nc, alpha, &info, &threshold, &rms, &cvrms, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif +#if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -This structure is a clusterization engine. +Model's errors: + * RelCLSError - fraction of misclassified cases. + * AvgCE - acerage cross-entropy + * RMSError - root-mean-square error + * AvgError - average error + * AvgRelError - average relative error -You should not try to access its fields directly. -Use ALGLIB functions in order to work with this object. +NOTE 1: RelCLSError/AvgCE are zero on regression problems. - -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey +NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain + errors in prediction of posterior probabilities *************************************************************************/ -_clusterizerstate_owner::_clusterizerstate_owner() +_modelerrors_owner::_modelerrors_owner() { - p_struct = (alglib_impl::clusterizerstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::clusterizerstate), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_clusterizerstate_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_modelerrors_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::modelerrors*)alglib_impl::ae_malloc(sizeof(alglib_impl::modelerrors), &_state); + memset(p_struct, 0, sizeof(alglib_impl::modelerrors)); + alglib_impl::_modelerrors_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } -_clusterizerstate_owner::_clusterizerstate_owner(const _clusterizerstate_owner &rhs) +_modelerrors_owner::_modelerrors_owner(const _modelerrors_owner &rhs) { - p_struct = (alglib_impl::clusterizerstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::clusterizerstate), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_clusterizerstate_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_modelerrors_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: modelerrors copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::modelerrors*)alglib_impl::ae_malloc(sizeof(alglib_impl::modelerrors), &_state); + memset(p_struct, 0, sizeof(alglib_impl::modelerrors)); + alglib_impl::_modelerrors_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -_clusterizerstate_owner& _clusterizerstate_owner::operator=(const _clusterizerstate_owner &rhs) +_modelerrors_owner& _modelerrors_owner::operator=(const _modelerrors_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_clusterizerstate_clear(p_struct); - alglib_impl::_clusterizerstate_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: modelerrors assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: modelerrors assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_modelerrors_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::modelerrors)); + alglib_impl::_modelerrors_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } -_clusterizerstate_owner::~_clusterizerstate_owner() +_modelerrors_owner::~_modelerrors_owner() { - alglib_impl::_clusterizerstate_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_modelerrors_destroy(p_struct); + ae_free(p_struct); + } } -alglib_impl::clusterizerstate* _clusterizerstate_owner::c_ptr() +alglib_impl::modelerrors* _modelerrors_owner::c_ptr() { return p_struct; } -alglib_impl::clusterizerstate* _clusterizerstate_owner::c_ptr() const +alglib_impl::modelerrors* _modelerrors_owner::c_ptr() const { - return const_cast(p_struct); + return const_cast(p_struct); } -clusterizerstate::clusterizerstate() : _clusterizerstate_owner() +modelerrors::modelerrors() : _modelerrors_owner() ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror) { } -clusterizerstate::clusterizerstate(const clusterizerstate &rhs):_clusterizerstate_owner(rhs) +modelerrors::modelerrors(const modelerrors &rhs):_modelerrors_owner(rhs) ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror) { } -clusterizerstate& clusterizerstate::operator=(const clusterizerstate &rhs) +modelerrors& modelerrors::operator=(const modelerrors &rhs) { if( this==&rhs ) return *this; - _clusterizerstate_owner::operator=(rhs); + _modelerrors_owner::operator=(rhs); return *this; } -clusterizerstate::~clusterizerstate() +modelerrors::~modelerrors() { } /************************************************************************* -This structure is used to store results of the agglomerative hierarchical -clustering (AHC). - -Following information is returned: - -* TerminationType - completion code: - * 1 for successful completion of algorithm - * -5 inappropriate combination of clustering algorithm and distance - function was used. As for now, it is possible only when Ward's - method is called for dataset with non-Euclidean distance function. - In case negative completion code is returned, other fields of report - structure are invalid and should not be used. - -* NPoints contains number of points in the original dataset - -* Z contains information about merges performed (see below). Z contains - indexes from the original (unsorted) dataset and it can be used when you - need to know what points were merged. However, it is not convenient when - you want to build a dendrograd (see below). - -* if you want to build dendrogram, you can use Z, but it is not good - option, because Z contains indexes from unsorted dataset. Dendrogram - built from such dataset is likely to have intersections. So, you have to - reorder you points before building dendrogram. - Permutation which reorders point is returned in P. Another representation - of merges, which is more convenient for dendorgram construction, is - returned in PM. - -* more information on format of Z, P and PM can be found below and in the - examples from ALGLIB Reference Manual. - -FORMAL DESCRIPTION OF FIELDS: - NPoints number of points - Z array[NPoints-1,2], contains indexes of clusters - linked in pairs to form clustering tree. I-th row - corresponds to I-th merge: - * Z[I,0] - index of the first cluster to merge - * Z[I,1] - index of the second cluster to merge - * Z[I,0](rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_multilayerperceptron_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: multilayerperceptron copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::multilayerperceptron*)alglib_impl::ae_malloc(sizeof(alglib_impl::multilayerperceptron), &_state); + memset(p_struct, 0, sizeof(alglib_impl::multilayerperceptron)); + alglib_impl::_multilayerperceptron_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -_ahcreport_owner& _ahcreport_owner::operator=(const _ahcreport_owner &rhs) +_multilayerperceptron_owner& _multilayerperceptron_owner::operator=(const _multilayerperceptron_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_ahcreport_clear(p_struct); - alglib_impl::_ahcreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: multilayerperceptron assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: multilayerperceptron assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_multilayerperceptron_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::multilayerperceptron)); + alglib_impl::_multilayerperceptron_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } -_ahcreport_owner::~_ahcreport_owner() +_multilayerperceptron_owner::~_multilayerperceptron_owner() { - alglib_impl::_ahcreport_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_multilayerperceptron_destroy(p_struct); + ae_free(p_struct); + } } -alglib_impl::ahcreport* _ahcreport_owner::c_ptr() +alglib_impl::multilayerperceptron* _multilayerperceptron_owner::c_ptr() { return p_struct; } -alglib_impl::ahcreport* _ahcreport_owner::c_ptr() const +alglib_impl::multilayerperceptron* _multilayerperceptron_owner::c_ptr() const { - return const_cast(p_struct); + return const_cast(p_struct); } -ahcreport::ahcreport() : _ahcreport_owner() ,terminationtype(p_struct->terminationtype),npoints(p_struct->npoints),p(&p_struct->p),z(&p_struct->z),pz(&p_struct->pz),pm(&p_struct->pm),mergedist(&p_struct->mergedist) +multilayerperceptron::multilayerperceptron() : _multilayerperceptron_owner() { } -ahcreport::ahcreport(const ahcreport &rhs):_ahcreport_owner(rhs) ,terminationtype(p_struct->terminationtype),npoints(p_struct->npoints),p(&p_struct->p),z(&p_struct->z),pz(&p_struct->pz),pm(&p_struct->pm),mergedist(&p_struct->mergedist) +multilayerperceptron::multilayerperceptron(const multilayerperceptron &rhs):_multilayerperceptron_owner(rhs) { } -ahcreport& ahcreport::operator=(const ahcreport &rhs) +multilayerperceptron& multilayerperceptron::operator=(const multilayerperceptron &rhs) { if( this==&rhs ) return *this; - _ahcreport_owner::operator=(rhs); + _multilayerperceptron_owner::operator=(rhs); return *this; } -ahcreport::~ahcreport() +multilayerperceptron::~multilayerperceptron() { } /************************************************************************* -This structure is used to store results of the k-means clustering -algorithm. - -Following information is always returned: -* NPoints contains number of points in the original dataset -* TerminationType contains completion code, negative on failure, positive - on success -* K contains number of clusters - -For positive TerminationType we return: -* NFeatures contains number of variables in the original dataset -* C, which contains centers found by algorithm -* CIdx, which maps points of the original dataset to clusters - -FORMAL DESCRIPTION OF FIELDS: - NPoints number of points, >=0 - NFeatures number of variables, >=1 - TerminationType completion code: - * -5 if distance type is anything different from - Euclidean metric - * -3 for degenerate dataset: a) less than K distinct - points, b) K=0 for non-empty dataset. - * +1 for successful completion - K number of clusters - C array[K,NFeatures], rows of the array store centers - CIdx array[NPoints], which contains cluster indexes - IterationsCount actual number of iterations performed by clusterizer. - If algorithm performed more than one random restart, - total number of iterations is returned. - Energy merit function, "energy", sum of squared deviations - from cluster centers +This function serializes data structure to string. - -- ALGLIB -- - Copyright 27.11.2012 by Bochkanov Sergey +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. *************************************************************************/ -_kmeansreport_owner::_kmeansreport_owner() +void mlpserialize(multilayerperceptron &obj, std::string &s_out) { - p_struct = (alglib_impl::kmeansreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::kmeansreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_kmeansreport_init(p_struct, NULL); -} + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + alglib_impl::ae_int_t ssize; -_kmeansreport_owner::_kmeansreport_owner(const _kmeansreport_owner &rhs) -{ - p_struct = (alglib_impl::kmeansreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::kmeansreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_kmeansreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::mlpalloc(&serializer, obj.c_ptr(), &state); + ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); + s_out.clear(); + s_out.reserve((size_t)(ssize+1)); + alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); + alglib_impl::mlpserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_assert( s_out.length()<=(size_t)ssize, "ALGLIB: serialization integrity error", &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } - -_kmeansreport_owner& _kmeansreport_owner::operator=(const _kmeansreport_owner &rhs) +/************************************************************************* +This function unserializes data structure from string. +*************************************************************************/ +void mlpunserialize(const std::string &s_in, multilayerperceptron &obj) { - if( this==&rhs ) - return *this; - alglib_impl::_kmeansreport_clear(p_struct); - alglib_impl::_kmeansreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; -_kmeansreport_owner::~_kmeansreport_owner() -{ - alglib_impl::_kmeansreport_clear(p_struct); - ae_free(p_struct); + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); + alglib_impl::mlpunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } -alglib_impl::kmeansreport* _kmeansreport_owner::c_ptr() -{ - return p_struct; -} -alglib_impl::kmeansreport* _kmeansreport_owner::c_ptr() const +/************************************************************************* +This function serializes data structure to C++ stream. + +Data stream generated by this function is same as string representation +generated by string version of serializer - alphanumeric characters, +dots, underscores, minus signs, which are grouped into words separated by +spaces and CR+LF. + +We recommend you to read comments on string version of serializer to find +out more about serialization of AlGLIB objects. +*************************************************************************/ +void mlpserialize(multilayerperceptron &obj, std::ostream &s_out) { - return const_cast(p_struct); + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::mlpalloc(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_get_alloc_size(&serializer); // not actually needed, but we have to ask + alglib_impl::ae_serializer_sstart_stream(&serializer, &s_out); + alglib_impl::mlpserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } -kmeansreport::kmeansreport() : _kmeansreport_owner() ,npoints(p_struct->npoints),nfeatures(p_struct->nfeatures),terminationtype(p_struct->terminationtype),iterationscount(p_struct->iterationscount),energy(p_struct->energy),k(p_struct->k),c(&p_struct->c),cidx(&p_struct->cidx) +/************************************************************************* +This function unserializes data structure from stream. +*************************************************************************/ +void mlpunserialize(const std::istream &s_in, multilayerperceptron &obj) { -} + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; -kmeansreport::kmeansreport(const kmeansreport &rhs):_kmeansreport_owner(rhs) ,npoints(p_struct->npoints),nfeatures(p_struct->nfeatures),terminationtype(p_struct->terminationtype),iterationscount(p_struct->iterationscount),energy(p_struct->energy),k(p_struct->k),c(&p_struct->c),cidx(&p_struct->cidx) -{ + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_stream(&serializer, &s_in); + alglib_impl::mlpunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } -kmeansreport& kmeansreport::operator=(const kmeansreport &rhs) -{ - if( this==&rhs ) - return *this; - _kmeansreport_owner::operator=(rhs); - return *this; -} +/************************************************************************* +Creates neural network with NIn inputs, NOut outputs, without hidden +layers, with linear output layer. Network weights are filled with small +random values. -kmeansreport::~kmeansreport() + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreate0(nin, nout, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function initializes clusterizer object. Newly initialized object is -empty, i.e. it does not contain dataset. You should use it as follows: -1. creation -2. dataset is added with ClusterizerSetPoints() -3. additional parameters are set -3. clusterization is performed with one of the clustering functions +Same as MLPCreate0, but with one hidden layer (NHid neurons) with +non-linear activation function. Output layer is linear. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void clusterizercreate(clusterizerstate &s) +void mlpcreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizercreate(const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreate1(nin, nhid, nout, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function adds dataset to the clusterizer structure. - -This function overrides all previous calls of ClusterizerSetPoints() or -ClusterizerSetDistances(). - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - XY - array[NPoints,NFeatures], dataset - NPoints - number of points, >=0 - NFeatures- number of features, >=1 - DistType- distance function: - * 0 Chebyshev distance (L-inf norm) - * 1 city block distance (L1 norm) - * 2 Euclidean distance (L2 norm), non-squared - * 10 Pearson correlation: - dist(a,b) = 1-corr(a,b) - * 11 Absolute Pearson correlation: - dist(a,b) = 1-|corr(a,b)| - * 12 Uncentered Pearson correlation (cosine of the angle): - dist(a,b) = a'*b/(|a|*|b|) - * 13 Absolute uncentered Pearson correlation - dist(a,b) = |a'*b|/(|a|*|b|) - * 20 Spearman rank correlation: - dist(a,b) = 1-rankcorr(a,b) - * 21 Absolute Spearman rank correlation - dist(a,b) = 1-|rankcorr(a,b)| - -NOTE 1: different distance functions have different performance penalty: - * Euclidean or Pearson correlation distances are the fastest ones - * Spearman correlation distance function is a bit slower - * city block and Chebyshev distances are order of magnitude slower - - The reason behing difference in performance is that correlation-based - distance functions are computed using optimized linear algebra kernels, - while Chebyshev and city block distance functions are computed using - simple nested loops with two branches at each iteration. - -NOTE 2: different clustering algorithms have different limitations: - * agglomerative hierarchical clustering algorithms may be used with - any kind of distance metric - * k-means++ clustering algorithm may be used only with Euclidean - distance function - Thus, list of specific clustering algorithms you may use depends - on distance function you specify when you set your dataset. +Same as MLPCreate0, but with two hidden layers (NHid1 and NHid2 neurons) +with non-linear activation function. Output layer is linear. + $ALL -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype) +void mlpcreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetpoints(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nfeatures, disttype, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreate2(nin, nhid1, nhid2, nout, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function adds dataset to the clusterizer structure. +Creates neural network with NIn inputs, NOut outputs, without hidden +layers with non-linear output layer. Network weights are filled with small +random values. -This function overrides all previous calls of ClusterizerSetPoints() or -ClusterizerSetDistances(). +Activation function of the output layer takes values: -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - XY - array[NPoints,NFeatures], dataset - NPoints - number of points, >=0 - NFeatures- number of features, >=1 - DistType- distance function: - * 0 Chebyshev distance (L-inf norm) - * 1 city block distance (L1 norm) - * 2 Euclidean distance (L2 norm), non-squared - * 10 Pearson correlation: - dist(a,b) = 1-corr(a,b) - * 11 Absolute Pearson correlation: - dist(a,b) = 1-|corr(a,b)| - * 12 Uncentered Pearson correlation (cosine of the angle): - dist(a,b) = a'*b/(|a|*|b|) - * 13 Absolute uncentered Pearson correlation - dist(a,b) = |a'*b|/(|a|*|b|) - * 20 Spearman rank correlation: - dist(a,b) = 1-rankcorr(a,b) - * 21 Absolute Spearman rank correlation - dist(a,b) = 1-|rankcorr(a,b)| + (B, +INF), if D>=0 -NOTE 1: different distance functions have different performance penalty: - * Euclidean or Pearson correlation distances are the fastest ones - * Spearman correlation distance function is a bit slower - * city block and Chebyshev distances are order of magnitude slower +or - The reason behing difference in performance is that correlation-based - distance functions are computed using optimized linear algebra kernels, - while Chebyshev and city block distance functions are computed using - simple nested loops with two branches at each iteration. + (-INF, B), if D<0. -NOTE 2: different clustering algorithms have different limitations: - * agglomerative hierarchical clustering algorithms may be used with - any kind of distance metric - * k-means++ clustering algorithm may be used only with Euclidean - distance function - Thus, list of specific clustering algorithms you may use depends - on distance function you specify when you set your dataset. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 30.03.2008 by Bochkanov Sergey *************************************************************************/ -void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t disttype) +void mlpcreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, multilayerperceptron &network, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; - ae_int_t npoints; - ae_int_t nfeatures; - - npoints = xy.rows(); - nfeatures = xy.cols(); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetpoints(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nfeatures, disttype, &_alglib_env_state); - - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreateb0(nin, nout, b, d, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function adds dataset given by distance matrix to the clusterizer -structure. It is important that dataset is not given explicitly - only -distance matrix is given. - -This function overrides all previous calls of ClusterizerSetPoints() or -ClusterizerSetDistances(). - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - D - array[NPoints,NPoints], distance matrix given by its upper - or lower triangle (main diagonal is ignored because its - entries are expected to be zero). - NPoints - number of points - IsUpper - whether upper or lower triangle of D is given. - -NOTE 1: different clustering algorithms have different limitations: - * agglomerative hierarchical clustering algorithms may be used with - any kind of distance metric, including one which is given by - distance matrix - * k-means++ clustering algorithm may be used only with Euclidean - distance function and explicitly given points - it can not be - used with dataset given by distance matrix - Thus, if you call this function, you will be unable to use k-means - clustering algorithm to process your problem. +Same as MLPCreateB0 but with non-linear hidden layer. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 30.03.2008 by Bochkanov Sergey *************************************************************************/ -void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const ae_int_t npoints, const bool isupper) +void mlpcreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetdistances(const_cast(s.c_ptr()), const_cast(d.c_ptr()), npoints, isupper, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreateb1(nin, nhid, nout, b, d, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function adds dataset given by distance matrix to the clusterizer -structure. It is important that dataset is not given explicitly - only -distance matrix is given. - -This function overrides all previous calls of ClusterizerSetPoints() or -ClusterizerSetDistances(). - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - D - array[NPoints,NPoints], distance matrix given by its upper - or lower triangle (main diagonal is ignored because its - entries are expected to be zero). - NPoints - number of points - IsUpper - whether upper or lower triangle of D is given. - -NOTE 1: different clustering algorithms have different limitations: - * agglomerative hierarchical clustering algorithms may be used with - any kind of distance metric, including one which is given by - distance matrix - * k-means++ clustering algorithm may be used only with Euclidean - distance function and explicitly given points - it can not be - used with dataset given by distance matrix - Thus, if you call this function, you will be unable to use k-means - clustering algorithm to process your problem. +Same as MLPCreateB0 but with two non-linear hidden layers. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 30.03.2008 by Bochkanov Sergey *************************************************************************/ -void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const bool isupper) +void mlpcreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, multilayerperceptron &network, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; - ae_int_t npoints; - if( (d.rows()!=d.cols())) - throw ap_error("Error while calling 'clusterizersetdistances': looks like one of arguments has wrong size"); - npoints = d.rows(); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetdistances(const_cast(s.c_ptr()), const_cast(d.c_ptr()), npoints, isupper, &_alglib_env_state); - - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreateb2(nin, nhid1, nhid2, nout, b, d, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function sets agglomerative hierarchical clustering algorithm - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - Algo - algorithm type: - * 0 complete linkage (default algorithm) - * 1 single linkage - * 2 unweighted average linkage - * 3 weighted average linkage - * 4 Ward's method - -NOTE: Ward's method works correctly only with Euclidean distance, that's - why algorithm will return negative termination code (failure) for - any other distance type. - - It is possible, however, to use this method with user-supplied - distance matrix. It is your responsibility to pass one which was - calculated with Euclidean distance function. +Creates neural network with NIn inputs, NOut outputs, without hidden +layers with non-linear output layer. Network weights are filled with small +random values. Activation function of the output layer takes values [A,B]. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 30.03.2008 by Bochkanov Sergey *************************************************************************/ -void clusterizersetahcalgo(const clusterizerstate &s, const ae_int_t algo) +void mlpcreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetahcalgo(const_cast(s.c_ptr()), algo, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreater0(nin, nout, a, b, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function sets k-means properties: number of restarts and maximum -number of iterations per one run. - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - Restarts- restarts count, >=1. - k-means++ algorithm performs several restarts and chooses - best set of centers (one with minimum squared distance). - MaxIts - maximum number of k-means iterations performed during one - run. >=0, zero value means that algorithm performs unlimited - number of iterations. +Same as MLPCreateR0, but with non-linear hidden layer. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 30.03.2008 by Bochkanov Sergey *************************************************************************/ -void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t restarts, const ae_int_t maxits) +void mlpcreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetkmeanslimits(const_cast(s.c_ptr()), restarts, maxits, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreater1(nin, nhid, nout, a, b, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function sets k-means initialization algorithm. Several different -algorithms can be chosen, including k-means++. - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - InitAlgo- initialization algorithm: - * 0 automatic selection ( different versions of ALGLIB - may select different algorithms) - * 1 random initialization - * 2 k-means++ initialization (best quality of initial - centers, but long non-parallelizable initialization - phase with bad cache locality) - * 3 "fast-greedy" algorithm with efficient, easy to - parallelize initialization. Quality of initial centers - is somewhat worse than that of k-means++. This - algorithm is a default one in the current version of - ALGLIB. - *-1 "debug" algorithm which always selects first K rows - of dataset; this algorithm is used for debug purposes - only. Do not use it in the industrial code! +Same as MLPCreateR0, but with two non-linear hidden layers. -- ALGLIB -- - Copyright 21.01.2015 by Bochkanov Sergey + Copyright 30.03.2008 by Bochkanov Sergey *************************************************************************/ -void clusterizersetkmeansinit(const clusterizerstate &s, const ae_int_t initalgo) +void mlpcreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizersetkmeansinit(const_cast(s.c_ptr()), initalgo, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreater2(nin, nhid1, nhid2, nout, a, b, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function performs agglomerative hierarchical clustering - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Agglomerative hierarchical clustering algorithm has two phases: - ! distance matrix calculation and clustering itself. Only first phase - ! (distance matrix calculation) is accelerated by Intel MKL and multi- - ! threading. Thus, acceleration is significant only for medium or high- - ! dimensional problems. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - -OUTPUT PARAMETERS: - Rep - clustering results; see description of AHCReport - structure for more information. - -NOTE 1: hierarchical clustering algorithms require large amounts of memory. - In particular, this implementation needs sizeof(double)*NPoints^2 - bytes, which are used to store distance matrix. In case we work - with user-supplied matrix, this amount is multiplied by 2 (we have - to store original matrix and to work with its copy). - - For example, problem with 10000 points would require 800M of RAM, - even when working in a 1-dimensional space. +Creates classifier network with NIn inputs and NOut possible classes. +Network contains no hidden layers and linear output layer with SOFTMAX- +normalization (so outputs sums up to 1.0 and converge to posterior +probabilities). -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep) +void mlpcreatec0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizerrunahc(const_cast(s.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreatec0(nin, nout, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Same as MLPCreateC0, but with one non-linear hidden layer. -void smp_clusterizerrunahc(const clusterizerstate &s, ahcreport &rep) + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +void mlpcreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_clusterizerrunahc(const_cast(s.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreatec1(nin, nhid, nout, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function performs clustering by k-means++ algorithm. - -You may change algorithm properties by calling: -* ClusterizerSetKMeansLimits() to change number of restarts or iterations -* ClusterizerSetKMeansInit() to change initialization algorithm - -By default, one restart and unlimited number of iterations are used. -Initialization algorithm is chosen automatically. - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (can be used from C# and C++) - ! * access to high-performance C++ core (actual for C# users) - ! - ! K-means clustering algorithm has two phases: selection of initial - ! centers and clustering itself. ALGLIB parallelizes both phases. - ! Parallel version is optimized for the following scenario: medium or - ! high-dimensional problem (20 or more dimensions) with large number of - ! points and clusters. However, some speed-up can be obtained even when - ! assumptions above are violated. - ! - ! As for native-vs-managed comparison, working with native core brings - ! 30-40% improvement in speed over pure C# version of ALGLIB. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - K - number of clusters, K>=0. - K can be zero only when algorithm is called for empty - dataset, in this case completion code is set to - success (+1). - If K=0 and dataset size is non-zero, we can not - meaningfully assign points to some center (there are no - centers because K=0) and return -3 as completion code - (failure). - -OUTPUT PARAMETERS: - Rep - clustering results; see description of KMeansReport - structure for more information. - -NOTE 1: k-means clustering can be performed only for datasets with - Euclidean distance function. Algorithm will return negative - completion code in Rep.TerminationType in case dataset was added - to clusterizer with DistType other than Euclidean (or dataset was - specified by distance matrix instead of explicitly given points). +Same as MLPCreateC0, but with two non-linear hidden layers. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmeansreport &rep) +void mlpcreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizerrunkmeans(const_cast(s.c_ptr()), k, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreatec2(nin, nhid1, nhid2, nout, const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Copying of neural network + +INPUT PARAMETERS: + Network1 - original + +OUTPUT PARAMETERS: + Network2 - copy -void smp_clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmeansreport &rep) + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +void mlpcopy(const multilayerperceptron &network1, multilayerperceptron &network2, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_clusterizerrunkmeans(const_cast(s.c_ptr()), k, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcopy(const_cast(network1.c_ptr()), const_cast(network2.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function returns distance matrix for dataset - -COMMERCIAL EDITION OF ALGLIB: +This function copies tunable parameters (weights/means/sigmas) from one +network to another with same architecture. It performs some rudimentary +checks that architectures are same, and throws exception if check fails. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Agglomerative hierarchical clustering algorithm has two phases: - ! distance matrix calculation and clustering itself. Only first phase - ! (distance matrix calculation) is accelerated by Intel MKL and multi- - ! threading. Thus, acceleration is significant only for medium or high- - ! dimensional problems. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +It is intended for fast copying of states between two network which are +known to have same geometry. INPUT PARAMETERS: - XY - array[NPoints,NFeatures], dataset - NPoints - number of points, >=0 - NFeatures- number of features, >=1 - DistType- distance function: - * 0 Chebyshev distance (L-inf norm) - * 1 city block distance (L1 norm) - * 2 Euclidean distance (L2 norm, non-squared) - * 10 Pearson correlation: - dist(a,b) = 1-corr(a,b) - * 11 Absolute Pearson correlation: - dist(a,b) = 1-|corr(a,b)| - * 12 Uncentered Pearson correlation (cosine of the angle): - dist(a,b) = a'*b/(|a|*|b|) - * 13 Absolute uncentered Pearson correlation - dist(a,b) = |a'*b|/(|a|*|b|) - * 20 Spearman rank correlation: - dist(a,b) = 1-rankcorr(a,b) - * 21 Absolute Spearman rank correlation - dist(a,b) = 1-|rankcorr(a,b)| + Network1 - source, must be correctly initialized + Network2 - target, must have same architecture OUTPUT PARAMETERS: - D - array[NPoints,NPoints], distance matrix - (full matrix is returned, with lower and upper triangles) - -NOTE: different distance functions have different performance penalty: - * Euclidean or Pearson correlation distances are the fastest ones - * Spearman correlation distance function is a bit slower - * city block and Chebyshev distances are order of magnitude slower - - The reason behing difference in performance is that correlation-based - distance functions are computed using optimized linear algebra kernels, - while Chebyshev and city block distance functions are computed using - simple nested loops with two branches at each iteration. + Network2 - network state is copied from source to target -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 20.06.2013 by Bochkanov Sergey *************************************************************************/ -void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d) +void mlpcopytunableparameters(const multilayerperceptron &network1, const multilayerperceptron &network2, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::clusterizergetdistances(const_cast(xy.c_ptr()), npoints, nfeatures, disttype, const_cast(d.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcopytunableparameters(const_cast(network1.c_ptr()), const_cast(network2.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Randomization of neural network weights -void smp_clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d) + -- ALGLIB -- + Copyright 06.11.2007 by Bochkanov Sergey +*************************************************************************/ +void mlprandomize(const multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_clusterizergetdistances(const_cast(xy.c_ptr()), npoints, nfeatures, disttype, const_cast(d.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlprandomize(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function takes as input clusterization report Rep, desired clusters -count K, and builds top K clusters from hierarchical clusterization tree. -It returns assignment of points to clusters (array of cluster indexes). - -INPUT PARAMETERS: - Rep - report from ClusterizerRunAHC() performed on XY - K - desired number of clusters, 1<=K<=NPoints. - K can be zero only when NPoints=0. - -OUTPUT PARAMETERS: - CIdx - array[NPoints], I-th element contains cluster index (from - 0 to K-1) for I-th point of the dataset. - CZ - array[K]. This array allows to convert cluster indexes - returned by this function to indexes used by Rep.Z. J-th - cluster returned by this function corresponds to CZ[J]-th - cluster stored in Rep.Z/PZ/PM. - It is guaranteed that CZ[I](rep.c_ptr()), k, const_cast(cidx.c_ptr()), const_cast(cz.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlprandomizefull(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function accepts AHC report Rep, desired minimum intercluster -distance and returns top clusters from hierarchical clusterization tree -which are separated by distance R or HIGHER. - -It returns assignment of points to clusters (array of cluster indexes). - -There is one more function with similar name - ClusterizerSeparatedByCorr, -which returns clusters with intercluster correlation equal to R or LOWER -(note: higher for distance, lower for correlation). - -INPUT PARAMETERS: - Rep - report from ClusterizerRunAHC() performed on XY - R - desired minimum intercluster distance, R>=0 - -OUTPUT PARAMETERS: - K - number of clusters, 1<=K<=NPoints - CIdx - array[NPoints], I-th element contains cluster index (from - 0 to K-1) for I-th point of the dataset. - CZ - array[K]. This array allows to convert cluster indexes - returned by this function to indexes used by Rep.Z. J-th - cluster returned by this function corresponds to CZ[J]-th - cluster stored in Rep.Z/PZ/PM. - It is guaranteed that CZ[I](rep.c_ptr()), r, &k, const_cast(cidx.c_ptr()), const_cast(cz.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpinitpreprocessor(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function accepts AHC report Rep, desired maximum intercluster -correlation and returns top clusters from hierarchical clusterization tree -which are separated by correlation R or LOWER. - -It returns assignment of points to clusters (array of cluster indexes). - -There is one more function with similar name - ClusterizerSeparatedByDist, -which returns clusters with intercluster distance equal to R or HIGHER -(note: higher for distance, lower for correlation). - -INPUT PARAMETERS: - Rep - report from ClusterizerRunAHC() performed on XY - R - desired maximum intercluster correlation, -1<=R<=+1 - -OUTPUT PARAMETERS: - K - number of clusters, 1<=K<=NPoints - CIdx - array[NPoints], I-th element contains cluster index (from - 0 to K-1) for I-th point of the dataset. - CZ - array[K]. This array allows to convert cluster indexes - returned by this function to indexes used by Rep.Z. J-th - cluster returned by this function corresponds to CZ[J]-th - cluster stored in Rep.Z/PZ/PM. - It is guaranteed that CZ[I](rep.c_ptr()), r, &k, const_cast(cidx.c_ptr()), const_cast(cz.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpproperties(const_cast(network.c_ptr()), &nin, &nout, &wcount, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -k-means++ clusterization. -Backward compatibility function, we recommend to use CLUSTERING subpackage -as better replacement. +Returns number of inputs. -- ALGLIB -- - Copyright 21.03.2009 by Bochkanov Sergey + Copyright 19.10.2011 by Bochkanov Sergey *************************************************************************/ -void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc) +ae_int_t mlpgetinputscount(const multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::kmeansgenerate(const_cast(xy.c_ptr()), npoints, nvars, k, restarts, &info, const_cast(c.c_ptr()), const_cast(xyc.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mlpgetinputscount(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* +Returns number of outputs. + -- ALGLIB -- + Copyright 19.10.2011 by Bochkanov Sergey *************************************************************************/ -_decisionforest_owner::_decisionforest_owner() -{ - p_struct = (alglib_impl::decisionforest*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforest), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_decisionforest_init(p_struct, NULL); -} - -_decisionforest_owner::_decisionforest_owner(const _decisionforest_owner &rhs) +ae_int_t mlpgetoutputscount(const multilayerperceptron &network, const xparams _xparams) { - p_struct = (alglib_impl::decisionforest*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforest), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_decisionforest_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mlpgetoutputscount(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -_decisionforest_owner& _decisionforest_owner::operator=(const _decisionforest_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_decisionforest_clear(p_struct); - alglib_impl::_decisionforest_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} +/************************************************************************* +Returns number of weights. -_decisionforest_owner::~_decisionforest_owner() + -- ALGLIB -- + Copyright 19.10.2011 by Bochkanov Sergey +*************************************************************************/ +ae_int_t mlpgetweightscount(const multilayerperceptron &network, const xparams _xparams) { - alglib_impl::_decisionforest_clear(p_struct); - ae_free(p_struct); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mlpgetweightscount(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -alglib_impl::decisionforest* _decisionforest_owner::c_ptr() -{ - return p_struct; -} +/************************************************************************* +Tells whether network is SOFTMAX-normalized (i.e. classifier) or not. -alglib_impl::decisionforest* _decisionforest_owner::c_ptr() const -{ - return const_cast(p_struct); -} -decisionforest::decisionforest() : _decisionforest_owner() + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +bool mlpissoftmax(const multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + ae_bool result = alglib_impl::mlpissoftmax(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -decisionforest::decisionforest(const decisionforest &rhs):_decisionforest_owner(rhs) -{ -} +/************************************************************************* +This function returns total number of layers (including input, hidden and +output layers). -decisionforest& decisionforest::operator=(const decisionforest &rhs) + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +ae_int_t mlpgetlayerscount(const multilayerperceptron &network, const xparams _xparams) { - if( this==&rhs ) - return *this; - _decisionforest_owner::operator=(rhs); - return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mlpgetlayerscount(const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -decisionforest::~decisionforest() -{ -} +/************************************************************************* +This function returns size of K-th layer. +K=0 corresponds to input layer, K=CNT-1 corresponds to output layer. -/************************************************************************* +Size of the output layer is always equal to the number of outputs, although +when we have softmax-normalized network, last neuron doesn't have any +connections - it is just zero. + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -_dfreport_owner::_dfreport_owner() -{ - p_struct = (alglib_impl::dfreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::dfreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_dfreport_init(p_struct, NULL); -} - -_dfreport_owner::_dfreport_owner(const _dfreport_owner &rhs) +ae_int_t mlpgetlayersize(const multilayerperceptron &network, const ae_int_t k, const xparams _xparams) { - p_struct = (alglib_impl::dfreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::dfreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_dfreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mlpgetlayersize(const_cast(network.c_ptr()), k, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -_dfreport_owner& _dfreport_owner::operator=(const _dfreport_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_dfreport_clear(p_struct); - alglib_impl::_dfreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} +/************************************************************************* +This function returns offset/scaling coefficients for I-th input of the +network. -_dfreport_owner::~_dfreport_owner() -{ - alglib_impl::_dfreport_clear(p_struct); - ae_free(p_struct); -} +INPUT PARAMETERS: + Network - network + I - input index -alglib_impl::dfreport* _dfreport_owner::c_ptr() -{ - return p_struct; -} +OUTPUT PARAMETERS: + Mean - mean term + Sigma - sigma term, guaranteed to be nonzero. -alglib_impl::dfreport* _dfreport_owner::c_ptr() const -{ - return const_cast(p_struct); -} -dfreport::dfreport() : _dfreport_owner() ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),oobrelclserror(p_struct->oobrelclserror),oobavgce(p_struct->oobavgce),oobrmserror(p_struct->oobrmserror),oobavgerror(p_struct->oobavgerror),oobavgrelerror(p_struct->oobavgrelerror) -{ -} +I-th input is passed through linear transformation + IN[i] = (IN[i]-Mean)/Sigma +before feeding to the network -dfreport::dfreport(const dfreport &rhs):_dfreport_owner(rhs) ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),oobrelclserror(p_struct->oobrelclserror),oobavgce(p_struct->oobavgce),oobrmserror(p_struct->oobrmserror),oobavgerror(p_struct->oobavgerror),oobavgrelerror(p_struct->oobavgrelerror) + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey +*************************************************************************/ +void mlpgetinputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgetinputscaling(const_cast(network.c_ptr()), i, &mean, &sigma, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } -dfreport& dfreport::operator=(const dfreport &rhs) -{ - if( this==&rhs ) - return *this; - _dfreport_owner::operator=(rhs); - return *this; -} +/************************************************************************* +This function returns offset/scaling coefficients for I-th output of the +network. -dfreport::~dfreport() -{ -} +INPUT PARAMETERS: + Network - network + I - input index +OUTPUT PARAMETERS: + Mean - mean term + Sigma - sigma term, guaranteed to be nonzero. -/************************************************************************* -This function serializes data structure to string. +I-th output is passed through linear transformation + OUT[i] = OUT[i]*Sigma+Mean +before returning it to user. In case we have SOFTMAX-normalized network, +we return (Mean,Sigma)=(0.0,1.0). -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -void dfserialize(decisionforest &obj, std::string &s_out) +void mlpgetoutputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma, const xparams _xparams) { - alglib_impl::ae_state state; - alglib_impl::ae_serializer serializer; - alglib_impl::ae_int_t ssize; - - alglib_impl::ae_state_init(&state); - try - { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_alloc_start(&serializer); - alglib_impl::dfalloc(&serializer, obj.c_ptr(), &state); - ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); - s_out.clear(); - s_out.reserve((size_t)(ssize+1)); - alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); - alglib_impl::dfserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - if( s_out.length()>(size_t)ssize ) - throw ap_error("ALGLIB: serialization integrity error"); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); - } - catch(alglib_impl::ae_error_type) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - throw ap_error(state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgetoutputscaling(const_cast(network.c_ptr()), i, &mean, &sigma, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } + /************************************************************************* -This function unserializes data structure from string. +This function returns information about Ith neuron of Kth layer + +INPUT PARAMETERS: + Network - network + K - layer index + I - neuron index (within layer) + +OUTPUT PARAMETERS: + FKind - activation function type (used by MLPActivationFunction()) + this value is zero for input or linear neurons + Threshold - also called offset, bias + zero for input neurons + +NOTE: this function throws exception if layer or neuron with given index +do not exists. + + -- ALGLIB -- + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -void dfunserialize(std::string &s_in, decisionforest &obj) +void mlpgetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, ae_int_t &fkind, double &threshold, const xparams _xparams) { - alglib_impl::ae_state state; - alglib_impl::ae_serializer serializer; - - alglib_impl::ae_state_init(&state); - try - { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); - alglib_impl::dfunserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); - } - catch(alglib_impl::ae_error_type) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - throw ap_error(state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgetneuroninfo(const_cast(network.c_ptr()), k, i, &fkind, &threshold, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This subroutine builds random decision forest. +This function returns information about connection from I0-th neuron of +K0-th layer to I1-th neuron of K1-th layer. INPUT PARAMETERS: - XY - training set - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - task type: - * NClasses=1 - regression task with one - dependent variable - * NClasses>1 - classification task with - NClasses classes. - NTrees - number of trees in a forest, NTrees>=1. - recommended values: 50-100. - R - percent of a training set used to build - individual trees. 01). - * 1, if task has been solved - DF - model built - Rep - training report, contains error on a training set - and out-of-bag estimates of generalization error. +RESULT: + connection weight (zero for non-existent connections) + +This function: +1. throws exception if layer or neuron with given index do not exists. +2. returns zero if neurons exist, but there is no connection between them -- ALGLIB -- - Copyright 19.02.2009 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const double r, ae_int_t &info, decisionforest &df, dfreport &rep) +double mlpgetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::dfbuildrandomdecisionforest(const_cast(xy.c_ptr()), npoints, nvars, nclasses, ntrees, r, &info, const_cast(df.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpgetweight(const_cast(network.c_ptr()), k0, i0, k1, i1, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -This subroutine builds random decision forest. -This function gives ability to tune number of variables used when choosing -best split. +This function sets offset/scaling coefficients for I-th input of the +network. INPUT PARAMETERS: - XY - training set - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - task type: - * NClasses=1 - regression task with one - dependent variable - * NClasses>1 - classification task with - NClasses classes. - NTrees - number of trees in a forest, NTrees>=1. - recommended values: 50-100. - NRndVars - number of variables used when choosing best split - R - percent of a training set used to build - individual trees. 01). - * 1, if task has been solved - DF - model built - Rep - training report, contains error on a training set - and out-of-bag estimates of generalization error. +NTE: I-th input is passed through linear transformation + IN[i] = (IN[i]-Mean)/Sigma +before feeding to the network. This function sets Mean and Sigma. -- ALGLIB -- - Copyright 19.02.2009 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest &df, dfreport &rep) +void mlpsetinputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::dfbuildrandomdecisionforestx1(const_cast(xy.c_ptr()), npoints, nvars, nclasses, ntrees, nrndvars, r, &info, const_cast(df.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetinputscaling(const_cast(network.c_ptr()), i, mean, sigma, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Procesing +This function sets offset/scaling coefficients for I-th output of the +network. INPUT PARAMETERS: - DF - decision forest model - X - input vector, array[0..NVars-1]. + Network - network + I - input index + Mean - mean term + Sigma - sigma term (if zero, will be replaced by 1.0) OUTPUT PARAMETERS: - Y - result. Regression estimate when solving regression task, - vector of posterior probabilities for classification task. -See also DFProcessI. +NOTE: I-th output is passed through linear transformation + OUT[i] = OUT[i]*Sigma+Mean +before returning it to user. This function sets Sigma/Mean. In case we +have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything +other than(0.0,1.0) - this function will throw exception. -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_array &y) +void mlpsetoutputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::dfprocess(const_cast(df.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetoutputscaling(const_cast(network.c_ptr()), i, mean, sigma, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -'interactive' variant of DFProcess for languages like Python which support -constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter +This function modifies information about Ith neuron of Kth layer -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +INPUT PARAMETERS: + Network - network + K - layer index + I - neuron index (within layer) + FKind - activation function type (used by MLPActivationFunction()) + this value must be zero for input neurons + (you can not set activation function for input neurons) + Threshold - also called offset, bias + this value must be zero for input neurons + (you can not set threshold for input neurons) + +NOTES: +1. this function throws exception if layer or neuron with given index do + not exists. +2. this function also throws exception when you try to set non-linear + activation function for input neurons (any kind of network) or for output + neurons of classifier network. +3. this function throws exception when you try to set non-zero threshold for + input neurons (any kind of network). -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -void dfprocessi(const decisionforest &df, const real_1d_array &x, real_1d_array &y) +void mlpsetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, const ae_int_t fkind, const double threshold, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::dfprocessi(const_cast(df.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetneuroninfo(const_cast(network.c_ptr()), k, i, fkind, threshold, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Relative classification error on the test set +This function modifies information about connection from I0-th neuron of +K0-th layer to I1-th neuron of K1-th layer. INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size + Network - network + K0 - layer index + I0 - neuron index (within layer) + K1 - layer index + I1 - neuron index (within layer) + W - connection weight (must be zero for non-existent + connections) -RESULT: - percent of incorrectly classified cases. - Zero if model solves regression task. +This function: +1. throws exception if layer or neuron with given index do not exists. +2. throws exception if you try to set non-zero weight for non-existent + connection -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 25.03.2011 by Bochkanov Sergey *************************************************************************/ -double dfrelclserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints) +void mlpsetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const double w, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::dfrelclserror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetweight(const_cast(network.c_ptr()), k0, i0, k1, i1, w, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Average cross-entropy (in bits per element) on the test set +Neural network activation function INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size + NET - neuron input + K - function index (zero for linear function) -RESULT: - CrossEntropy/(NPoints*LN(2)). - Zero if model solves regression task. +OUTPUT PARAMETERS: + F - function + DF - its derivative + D2F - its second derivative -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -double dfavgce(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints) +void mlpactivationfunction(const double net, const ae_int_t k, double &f, double &df, double &d2f, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::dfavgce(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpactivationfunction(net, k, &f, &df, &d2f, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -RMS error on the test set +Procesing INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size + Network - neural network + X - input vector, array[0..NIn-1]. -RESULT: - root mean square error. - Its meaning for regression task is obvious. As for - classification task, RMS error means error when estimating posterior - probabilities. +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + +See also MLPProcessI -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -double dfrmserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints) +void mlpprocess(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::dfrmserror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpprocess(const_cast(network.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Average error on the test set - -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +'interactive' variant of MLPProcess for languages like Python which +support constructs like "Y = MLPProcess(NN,X)" and interactive mode of the +interpreter -RESULT: - Its meaning for regression task is obvious. As for - classification task, it means average error when estimating posterior - probabilities. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 21.09.2010 by Bochkanov Sergey *************************************************************************/ -double dfavgerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints) +void mlpprocessi(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::dfavgerror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpprocessi(const_cast(network.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Average relative error on the test set +Error of the neural network on dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. RESULT: - Its meaning for regression task is obvious. As for - classification task, it means average relative error when estimating - posterior probability of belonging to the correct class. + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -double dfavgrelerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints) +double mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::dfavgrelerror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlperror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* +Error of the neural network on dataset given by sparse matrix. -*************************************************************************/ -_linearmodel_owner::_linearmodel_owner() -{ - p_struct = (alglib_impl::linearmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::linearmodel), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_linearmodel_init(p_struct, NULL); -} - -_linearmodel_owner::_linearmodel_owner(const _linearmodel_owner &rhs) -{ - p_struct = (alglib_impl::linearmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::linearmodel), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_linearmodel_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_linearmodel_owner& _linearmodel_owner::operator=(const _linearmodel_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_linearmodel_clear(p_struct); - alglib_impl::_linearmodel_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_linearmodel_owner::~_linearmodel_owner() -{ - alglib_impl::_linearmodel_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::linearmodel* _linearmodel_owner::c_ptr() -{ - return p_struct; -} + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -alglib_impl::linearmodel* _linearmodel_owner::c_ptr() const -{ - return const_cast(p_struct); -} -linearmodel::linearmodel() : _linearmodel_owner() -{ -} +INPUT PARAMETERS: + Network - neural network + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0 -linearmodel::linearmodel(const linearmodel &rhs):_linearmodel_owner(rhs) -{ -} +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) -linearmodel& linearmodel::operator=(const linearmodel &rhs) -{ - if( this==&rhs ) - return *this; - _linearmodel_owner::operator=(rhs); - return *this; -} +DATASET FORMAT: -linearmodel::~linearmodel() -{ -} +This function uses two different dataset formats - one for regression +networks, another one for classification networks. +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs -/************************************************************************* -LRReport structure contains additional information about linear model: -* C - covariation matrix, array[0..NVars,0..NVars]. - C[i,j] = Cov(A[i],A[j]) -* RMSError - root mean square error on a training set -* AvgError - average error on a training set -* AvgRelError - average relative error on a training set (excluding - observations with zero function value). -* CVRMSError - leave-one-out cross-validation estimate of - generalization error. Calculated using fast algorithm - with O(NVars*NPoints) complexity. -* CVAvgError - cross-validation estimate of average error -* CVAvgRelError - cross-validation estimate of average relative error +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -All other fields of the structure are intended for internal use and should -not be used outside ALGLIB. + -- ALGLIB -- + Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -_lrreport_owner::_lrreport_owner() -{ - p_struct = (alglib_impl::lrreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::lrreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_lrreport_init(p_struct, NULL); -} - -_lrreport_owner::_lrreport_owner(const _lrreport_owner &rhs) -{ - p_struct = (alglib_impl::lrreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::lrreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_lrreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_lrreport_owner& _lrreport_owner::operator=(const _lrreport_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_lrreport_clear(p_struct); - alglib_impl::_lrreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_lrreport_owner::~_lrreport_owner() -{ - alglib_impl::_lrreport_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::lrreport* _lrreport_owner::c_ptr() -{ - return p_struct; -} - -alglib_impl::lrreport* _lrreport_owner::c_ptr() const -{ - return const_cast(p_struct); -} -lrreport::lrreport() : _lrreport_owner() ,c(&p_struct->c),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),cvrmserror(p_struct->cvrmserror),cvavgerror(p_struct->cvavgerror),cvavgrelerror(p_struct->cvavgrelerror),ncvdefects(p_struct->ncvdefects),cvdefects(&p_struct->cvdefects) +double mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlperrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } -lrreport::lrreport(const lrreport &rhs):_lrreport_owner(rhs) ,c(&p_struct->c),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),cvrmserror(p_struct->cvrmserror),cvavgerror(p_struct->cvavgerror),cvavgrelerror(p_struct->cvavgrelerror),ncvdefects(p_struct->ncvdefects),cvdefects(&p_struct->cvdefects) -{ -} +/************************************************************************* +Natural error function for neural network, internal subroutine. -lrreport& lrreport::operator=(const lrreport &rhs) -{ - if( this==&rhs ) - return *this; - _lrreport_owner::operator=(rhs); - return *this; -} +NOTE: this function is single-threaded. Unlike other error function, it +receives no speed-up from being executed in SMP mode. -lrreport::~lrreport() + -- ALGLIB -- + Copyright 04.11.2007 by Bochkanov Sergey +*************************************************************************/ +double mlperrorn(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, const xparams _xparams) { + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlperrorn(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Linear regression +Classification error of the neural network on dataset. -Subroutine builds model: + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. -and model found in ALGLIB format, covariation matrix, training set errors -(rms, average, average relative) and leave-one-out cross-validation -estimate of the generalization error. CV estimate calculated using fast -algorithm with O(NPoints*NVars) complexity. +RESULT: + classification error (number of misclassified cases) -When covariation matrix is calculated standard deviations of function -values are assumed to be equal to RMS error on the training set. +DATASET FORMAT: -INPUT PARAMETERS: - XY - training set, array [0..NPoints-1,0..NVars]: - * NVars columns - independent variables - * last column - dependent variable - NPoints - training set size, NPoints>NVars+1 - NVars - number of independent variables +This function uses two different dataset formats - one for regression +networks, another one for classification networks. -OUTPUT PARAMETERS: - Info - return code: - * -255, in case of unknown internal error - * -4, if internal SVD subroutine haven't converged - * -1, if incorrect parameters was passed (NPoints(xy.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mlpclserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Linear regression +Relative classification error on the test set. -Variant of LRBuild which uses vector of standatd deviations (errors in -function values). + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - XY - training set, array [0..NPoints-1,0..NVars]: - * NVars columns - independent variables - * last column - dependent variable - S - standard deviations (errors in function values) - array[0..NPoints-1], S[i]>0. - NPoints - training set size, NPoints>NVars+1 - NVars - number of independent variables + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. -OUTPUT PARAMETERS: - Info - return code: - * -255, in case of unknown internal error - * -4, if internal SVD subroutine haven't converged - * -1, if incorrect parameters was passed (NPoints(xy.c_ptr()), const_cast(s.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlprelclserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Like LRBuildS, but builds model +Relative classification error on the test set given by sparse matrix. - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -i.e. with zero constant term. +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. Sparse matrix must use CRS format + for storage. + NPoints - points count, >=0. - -- ALGLIB -- - Copyright 30.10.2008 by Bochkanov Sergey -*************************************************************************/ -void lrbuildzs(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::lrbuildzs(const_cast(xy.c_ptr()), const_cast(s.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +RESULT: +Percent of incorrectly classified cases. Works both for classifier +networks and general purpose networks used as classifiers. -/************************************************************************* -Like LRBuild but builds model +DATASET FORMAT: - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] +This function uses two different dataset formats - one for regression +networks, another one for classification networks. -i.e. with zero constant term. +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.10.2008 by Bochkanov Sergey + Copyright 09.08.2012 by Bochkanov Sergey *************************************************************************/ -void lrbuildz(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar) +double mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::lrbuildz(const_cast(xy.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlprelclserrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Unpacks coefficients of linear model. +Average cross-entropy (in bits per element) on the test set. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - LM - linear model in ALGLIB format + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. -OUTPUT PARAMETERS: - V - coefficients, array[0..NVars] - constant term (intercept) is stored in the V[NVars]. - NVars - number of independent variables (one less than number - of coefficients) +RESULT: +CrossEntropy/(NPoints*LN(2)). +Zero if network solves regression task. - -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey -*************************************************************************/ -void lrunpack(const linearmodel &lm, real_1d_array &v, ae_int_t &nvars) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::lrunpack(const_cast(lm.c_ptr()), const_cast(v.c_ptr()), &nvars, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +DATASET FORMAT: -/************************************************************************* -"Packs" coefficients and creates linear model in ALGLIB format (LRUnpack -reversed). +This function uses two different dataset formats - one for regression +networks, another one for classification networks. -INPUT PARAMETERS: - V - coefficients, array[0..NVars] - NVars - number of independent variables +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs -OUTPUT PAREMETERS: - LM - linear model. +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 08.01.2009 by Bochkanov Sergey *************************************************************************/ -void lrpack(const real_1d_array &v, const ae_int_t nvars, linearmodel &lm) +double mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::lrpack(const_cast(v.c_ptr()), nvars, const_cast(lm.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpavgce(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Procesing +Average cross-entropy (in bits per element) on the test set given by +sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - LM - linear model - X - input vector, array[0..NVars-1]. + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. -Result: - value of linear model regression estimate +RESULT: +CrossEntropy/(NPoints*LN(2)). +Zero if network solves regression task. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 03.09.2008 by Bochkanov Sergey + Copyright 9.08.2012 by Bochkanov Sergey *************************************************************************/ -double lrprocess(const linearmodel &lm, const real_1d_array &x) +double mlpavgcesparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::lrprocess(const_cast(lm.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpavgcesparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -RMS error on the test set +RMS error on the test set given. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - LM - linear model - XY - test set - NPoints - test set size + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. RESULT: - root mean square error. +Root mean square error. Its meaning for regression task is obvious. As for +classification task, RMS error means error when estimating posterior +probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -double lrrmserror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints) +double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::lrrmserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlprmserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Average error on the test set +RMS error on the test set given by sparse matrix. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - LM - linear model - XY - test set - NPoints - test set size + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. RESULT: - average error. +Root mean square error. Its meaning for regression task is obvious. As for +classification task, RMS error means error when estimating posterior +probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 09.08.2012 by Bochkanov Sergey *************************************************************************/ -double lravgerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints) +double mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::lravgerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlprmserrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -RMS error on the test set +Average absolute error on the test set. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - LM - linear model - XY - test set - NPoints - test set size + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. RESULT: - average relative error. +Its meaning for regression task is obvious. As for classification task, it +means average error when estimating posterior probabilities. + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey + Copyright 11.03.2008 by Bochkanov Sergey *************************************************************************/ -double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints) +double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::lravgrelerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpavgerror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Filters: simple moving averages (unsymmetric). +Average absolute error on the test set given by sparse matrix. -This filter replaces array by results of SMA(K) filter. SMA(K) is defined -as filter which averages at most K previous points (previous - not points -AROUND central point) - or less, in case of the first K-1 points. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - K - K>=1 (K can be larger than N , such cases will be - correctly handled). Window width. K=1 corresponds to - identity transformation (nothing changes). + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + NPoints - points count, >=0. -OUTPUT PARAMETERS: - X - array, whose first N elements were processed with SMA(K) +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average error when estimating posterior probabilities. -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. +DATASET FORMAT: -NOTE 2: this algorithm makes only one pass through array and uses running - sum to speed-up calculation of the averages. Additional measures - are taken to ensure that running sum on a long sequence of zero - elements will be correctly reset to zero even in the presence of - round-off error. +This function uses two different dataset formats - one for regression +networks, another one for classification networks. -NOTE 3: this is unsymmetric version of the algorithm, which does NOT - averages points after the current one. Only X[i], X[i-1], ... are - used when calculating new value of X[i]. We should also note that - this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 09.08.2012 by Bochkanov Sergey *************************************************************************/ -void filtersma(real_1d_array &x, const ae_int_t n, const ae_int_t k) +double mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::filtersma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpavgerrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Filters: simple moving averages (unsymmetric). +Average relative error on the test set. -This filter replaces array by results of SMA(K) filter. SMA(K) is defined -as filter which averages at most K previous points (previous - not points -AROUND central point) - or less, in case of the first K-1 points. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - K - K>=1 (K can be larger than N , such cases will be - correctly handled). Window width. K=1 corresponds to - identity transformation (nothing changes). + Network - neural network; + XY - training set, see below for information on the + training set format; + NPoints - points count. -OUTPUT PARAMETERS: - X - array, whose first N elements were processed with SMA(K) +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average relative error when estimating posterior probability of +belonging to the correct class. -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. +DATASET FORMAT: -NOTE 2: this algorithm makes only one pass through array and uses running - sum to speed-up calculation of the averages. Additional measures - are taken to ensure that running sum on a long sequence of zero - elements will be correctly reset to zero even in the presence of - round-off error. +This function uses two different dataset formats - one for regression +networks, another one for classification networks. -NOTE 3: this is unsymmetric version of the algorithm, which does NOT - averages points after the current one. Only X[i], X[i-1], ... are - used when calculating new value of X[i]. We should also note that - this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 11.03.2008 by Bochkanov Sergey *************************************************************************/ -void filtersma(real_1d_array &x, const ae_int_t k) +double mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; - ae_int_t n; - - n = x.length(); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::filtersma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); - - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpavgrelerror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Filters: exponential moving averages. +Average relative error on the test set given by sparse matrix. -This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is -defined as filter which replaces X[] by S[]: - S[0] = X[0] - S[t] = alpha*X[t] + (1-alpha)*S[t-1] + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - alpha - 0=0. -OUTPUT PARAMETERS: - X - array, whose first N elements were processed - with EMA(alpha) +RESULT: +Its meaning for regression task is obvious. As for classification task, it +means average relative error when estimating posterior probability of +belonging to the correct class. -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. +DATASET FORMAT: -NOTE 2: this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. +This function uses two different dataset formats - one for regression +networks, another one for classification networks. -NOTE 3: technical analytis users quite often work with EMA coefficient - expressed in DAYS instead of fractions. If you want to calculate - EMA(N), where N is a number of days, you can use alpha=2/(N+1). +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 09.08.2012 by Bochkanov Sergey *************************************************************************/ -void filterema(real_1d_array &x, const ae_int_t n, const double alpha) +double mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::filterema(const_cast(x.c_ptr()), n, alpha, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpavgrelerrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Filters: exponential moving averages. - -This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is -defined as filter which replaces X[] by S[]: - S[0] = X[0] - S[t] = alpha*X[t] + (1-alpha)*S[t-1] +Gradient calculation INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - alpha - 0(x.c_ptr()), n, alpha, &_alglib_env_state); - - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgrad(const_cast(network.c_ptr()), const_cast(x.c_ptr()), const_cast(desiredy.c_ptr()), &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Filters: linear regression moving averages. - -This filter replaces array by results of LRMA(K) filter. - -LRMA(K) is defined as filter which, for each data point, builds linear -regression model using K prevous points (point itself is included in -these K points) and calculates value of this linear model at the point in -question. +Gradient calculation (natural error function is used) INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - K - K>=1 (K can be larger than N , such cases will be - correctly handled). Window width. K=1 corresponds to - identity transformation (nothing changes). + Network - network initialized with one of the network creation funcs + X - input vector, length of array must be at least NIn + DesiredY- desired outputs, length of array must be at least NOut + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. OUTPUT PARAMETERS: - X - array, whose first N elements were processed with SMA(K) - -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. - -NOTE 2: this algorithm makes only one pass through array and uses running - sum to speed-up calculation of the averages. Additional measures - are taken to ensure that running sum on a long sequence of zero - elements will be correctly reset to zero even in the presence of - round-off error. - -NOTE 3: this is unsymmetric version of the algorithm, which does NOT - averages points after the current one. Only X[i], X[i-1], ... are - used when calculating new value of X[i]. We should also note that - this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. + E - error function, sum-of-squares for regression networks, + cross-entropy for classification networks. + Grad - gradient of E with respect to weights of network, array[WCount] -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k) +void mlpgradn(const multilayerperceptron &network, const real_1d_array &x, const real_1d_array &desiredy, double &e, real_1d_array &grad, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::filterlrma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgradn(const_cast(network.c_ptr()), const_cast(x.c_ptr()), const_cast(desiredy.c_ptr()), &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Filters: linear regression moving averages. - -This filter replaces array by results of LRMA(K) filter. +Batch gradient calculation for a set of inputs/outputs -LRMA(K) is defined as filter which, for each data point, builds linear -regression model using K prevous points (point itself is included in -these K points) and calculates value of this linear model at the point in -question. + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - X - array[N], array to process. It can be larger than N, - in this case only first N points are processed. - N - points count, N>=0 - K - K>=1 (K can be larger than N , such cases will be - correctly handled). Window width. K=1 corresponds to - identity transformation (nothing changes). + Network - network initialized with one of the network creation funcs + XY - original dataset in dense format; one sample = one row: + * first NIn columns contain inputs, + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SSize - number of elements in XY + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. OUTPUT PARAMETERS: - X - array, whose first N elements were processed with SMA(K) - -NOTE 1: this function uses efficient in-place algorithm which does not - allocate temporary arrays. - -NOTE 2: this algorithm makes only one pass through array and uses running - sum to speed-up calculation of the averages. Additional measures - are taken to ensure that running sum on a long sequence of zero - elements will be correctly reset to zero even in the presence of - round-off error. - -NOTE 3: this is unsymmetric version of the algorithm, which does NOT - averages points after the current one. Only X[i], X[i-1], ... are - used when calculating new value of X[i]. We should also note that - this algorithm uses BOTH previous points and current one, i.e. - new value of X[i] depends on BOTH previous point and X[i] itself. + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, array[WCount] -- ALGLIB -- - Copyright 25.10.2011 by Bochkanov Sergey + Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void filterlrma(real_1d_array &x, const ae_int_t k) +void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; - ae_int_t n; - - n = x.length(); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::filterlrma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); - - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgradbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Multiclass Fisher LDA - -Subroutine finds coefficients of linear combination which optimally separates -training set on classes. - -COMMERCIAL EDITION OF ALGLIB: +Batch gradient calculation for a set of inputs/outputs given by sparse +matrices - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Best results are achieved for high-dimensional problems - ! (NVars is at least 256). - ! - ! Multithreading is used to accelerate initial phase of LDA, which - ! includes calculation of products of large matrices. Again, for best - ! efficiency problem must be high-dimensional. + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - XY - training set, array[0..NPoints-1,0..NVars]. - First NVars columns store values of independent - variables, next column stores number of class (from 0 - to NClasses-1) which dataset element belongs to. Fractional - values are rounded to nearest integer. - NPoints - training set size, NPoints>=0 - NVars - number of independent variables, NVars>=1 - NClasses - number of classes, NClasses>=2 - + Network - network initialized with one of the network creation funcs + XY - original dataset in sparse format; one sample = one row: + * MATRIX MUST BE STORED IN CRS FORMAT + * first NIn columns contain inputs. + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SSize - number of elements in XY + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. OUTPUT PARAMETERS: - Info - return code: - * -4, if internal EVD subroutine hasn't converged - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed (NPoints<0, - NVars<1, NClasses<2) - * 1, if task has been solved - * 2, if there was a multicollinearity in training set, - but task has been solved. - W - linear combination coefficients, array[0..NVars-1] + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, array[WCount] -- ALGLIB -- - Copyright 31.05.2008 by Bochkanov Sergey + Copyright 26.07.2012 by Bochkanov Sergey *************************************************************************/ -void fisherlda(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_1d_array &w) +void mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::fisherlda(const_cast(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(w.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgradbatchsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -N-dimensional multiclass Fisher LDA - -Subroutine finds coefficients of linear combinations which optimally separates -training set on classes. It returns N-dimensional basis whose vector are sorted -by quality of training set separation (in descending order). - -COMMERCIAL EDITION OF ALGLIB: +Batch gradient calculation for a subset of dataset - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multithreading support - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Best results are achieved for high-dimensional problems - ! (NVars is at least 256). + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Multithreading is used to accelerate initial phase of LDA, which - ! includes calculation of products of large matrices. Again, for best - ! efficiency problem must be high-dimensional. - ! - ! Generally, commercial ALGLIB is several times faster than open-source - ! generic C edition, and many times faster than open-source C# edition. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - XY - training set, array[0..NPoints-1,0..NVars]. - First NVars columns store values of independent - variables, next column stores number of class (from 0 - to NClasses-1) which dataset element belongs to. Fractional - values are rounded to nearest integer. - NPoints - training set size, NPoints>=0 - NVars - number of independent variables, NVars>=1 - NClasses - number of classes, NClasses>=2 - + Network - network initialized with one of the network creation funcs + XY - original dataset in dense format; one sample = one row: + * first NIn columns contain inputs, + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SetSize - real size of XY, SetSize>=0; + Idx - subset of SubsetSize elements, array[SubsetSize]: + * Idx[I] stores row index in the original dataset which is + given by XY. Gradient is calculated with respect to rows + whose indexes are stored in Idx[]. + * Idx[] must store correct indexes; this function throws + an exception in case incorrect index (less than 0 or + larger than rows(XY)) is given + * Idx[] may store indexes in any order and even with + repetitions. + SubsetSize- number of elements in Idx[] array: + * positive value means that subset given by Idx[] is processed + * zero value results in zero gradient + * negative value means that full dataset is processed + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. OUTPUT PARAMETERS: - Info - return code: - * -4, if internal EVD subroutine hasn't converged - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed (NPoints<0, - NVars<1, NClasses<2) - * 1, if task has been solved - * 2, if there was a multicollinearity in training set, - but task has been solved. - W - basis, array[0..NVars-1,0..NVars-1] - columns of matrix stores basis vectors, sorted by - quality of training set separation (in descending order) + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, + array[WCount] -- ALGLIB -- - Copyright 31.05.2008 by Bochkanov Sergey + Copyright 26.07.2012 by Bochkanov Sergey *************************************************************************/ -void fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w) +void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::fisherldan(const_cast(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(w.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgradbatchsubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(idx.c_ptr()), subsetsize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Batch gradient calculation for a set of inputs/outputs for a subset of +dataset given by set of indexes. -void smp_fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::_pexec_fisherldan(const_cast(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(w.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Model's errors: - * RelCLSError - fraction of misclassified cases. - * AvgCE - acerage cross-entropy - * RMSError - root-mean-square error - * AvgError - average error - * AvgRelError - average relative error - -NOTE 1: RelCLSError/AvgCE are zero on regression problems. - -NOTE 2: on classification problems RMSError/AvgError/AvgRelError contain - errors in prediction of posterior probabilities -*************************************************************************/ -_modelerrors_owner::_modelerrors_owner() -{ - p_struct = (alglib_impl::modelerrors*)alglib_impl::ae_malloc(sizeof(alglib_impl::modelerrors), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_modelerrors_init(p_struct, NULL); -} - -_modelerrors_owner::_modelerrors_owner(const _modelerrors_owner &rhs) -{ - p_struct = (alglib_impl::modelerrors*)alglib_impl::ae_malloc(sizeof(alglib_impl::modelerrors), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_modelerrors_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_modelerrors_owner& _modelerrors_owner::operator=(const _modelerrors_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_modelerrors_clear(p_struct); - alglib_impl::_modelerrors_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_modelerrors_owner::~_modelerrors_owner() -{ - alglib_impl::_modelerrors_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::modelerrors* _modelerrors_owner::c_ptr() -{ - return p_struct; -} - -alglib_impl::modelerrors* _modelerrors_owner::c_ptr() const -{ - return const_cast(p_struct); -} -modelerrors::modelerrors() : _modelerrors_owner() ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror) -{ -} - -modelerrors::modelerrors(const modelerrors &rhs):_modelerrors_owner(rhs) ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror) -{ -} - -modelerrors& modelerrors::operator=(const modelerrors &rhs) -{ - if( this==&rhs ) - return *this; - _modelerrors_owner::operator=(rhs); - return *this; -} - -modelerrors::~modelerrors() -{ -} - - -/************************************************************************* - -*************************************************************************/ -_multilayerperceptron_owner::_multilayerperceptron_owner() -{ - p_struct = (alglib_impl::multilayerperceptron*)alglib_impl::ae_malloc(sizeof(alglib_impl::multilayerperceptron), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_multilayerperceptron_init(p_struct, NULL); -} - -_multilayerperceptron_owner::_multilayerperceptron_owner(const _multilayerperceptron_owner &rhs) -{ - p_struct = (alglib_impl::multilayerperceptron*)alglib_impl::ae_malloc(sizeof(alglib_impl::multilayerperceptron), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_multilayerperceptron_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_multilayerperceptron_owner& _multilayerperceptron_owner::operator=(const _multilayerperceptron_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_multilayerperceptron_clear(p_struct); - alglib_impl::_multilayerperceptron_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_multilayerperceptron_owner::~_multilayerperceptron_owner() -{ - alglib_impl::_multilayerperceptron_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::multilayerperceptron* _multilayerperceptron_owner::c_ptr() -{ - return p_struct; -} - -alglib_impl::multilayerperceptron* _multilayerperceptron_owner::c_ptr() const -{ - return const_cast(p_struct); -} -multilayerperceptron::multilayerperceptron() : _multilayerperceptron_owner() -{ -} - -multilayerperceptron::multilayerperceptron(const multilayerperceptron &rhs):_multilayerperceptron_owner(rhs) -{ -} - -multilayerperceptron& multilayerperceptron::operator=(const multilayerperceptron &rhs) -{ - if( this==&rhs ) - return *this; - _multilayerperceptron_owner::operator=(rhs); - return *this; -} + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -multilayerperceptron::~multilayerperceptron() -{ -} +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset in sparse format; one sample = one row: + * MATRIX MUST BE STORED IN CRS FORMAT + * first NIn columns contain inputs, + * for regression problem, next NOut columns store + desired outputs. + * for classification problem, next column (just one!) + stores class number. + SetSize - real size of XY, SetSize>=0; + Idx - subset of SubsetSize elements, array[SubsetSize]: + * Idx[I] stores row index in the original dataset which is + given by XY. Gradient is calculated with respect to rows + whose indexes are stored in Idx[]. + * Idx[] must store correct indexes; this function throws + an exception in case incorrect index (less than 0 or + larger than rows(XY)) is given + * Idx[] may store indexes in any order and even with + repetitions. + SubsetSize- number of elements in Idx[] array: + * positive value means that subset given by Idx[] is processed + * zero value results in zero gradient + * negative value means that full dataset is processed + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. +OUTPUT PARAMETERS: + E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) + Grad - gradient of E with respect to weights of network, + array[WCount] -/************************************************************************* -This function serializes data structure to string. +NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse + function. -Important properties of s_out: -* it contains alphanumeric characters, dots, underscores, minus signs -* these symbols are grouped into words, which are separated by spaces - and Windows-style (CR+LF) newlines -* although serializer uses spaces and CR+LF as separators, you can - replace any separator character by arbitrary combination of spaces, - tabs, Windows or Unix newlines. It allows flexible reformatting of - the string in case you want to include it into text or XML file. - But you should not insert separators into the middle of the "words" - nor you should change case of letters. -* s_out can be freely moved between 32-bit and 64-bit systems, little - and big endian machines, and so on. You can serialize structure on - 32-bit machine and unserialize it on 64-bit one (or vice versa), or - serialize it on SPARC and unserialize on x86. You can also - serialize it in C++ version of ALGLIB and unserialize in C# one, - and vice versa. + -- ALGLIB -- + Copyright 26.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpserialize(multilayerperceptron &obj, std::string &s_out) +void mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad, const xparams _xparams) { - alglib_impl::ae_state state; - alglib_impl::ae_serializer serializer; - alglib_impl::ae_int_t ssize; - - alglib_impl::ae_state_init(&state); - try - { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_alloc_start(&serializer); - alglib_impl::mlpalloc(&serializer, obj.c_ptr(), &state); - ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); - s_out.clear(); - s_out.reserve((size_t)(ssize+1)); - alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); - alglib_impl::mlpserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - if( s_out.length()>(size_t)ssize ) - throw ap_error("ALGLIB: serialization integrity error"); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); - } - catch(alglib_impl::ae_error_type) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - throw ap_error(state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgradbatchsparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(idx.c_ptr()), subsetsize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } + /************************************************************************* -This function unserializes data structure from string. -*************************************************************************/ -void mlpunserialize(std::string &s_in, multilayerperceptron &obj) -{ - alglib_impl::ae_state state; - alglib_impl::ae_serializer serializer; +Batch gradient calculation for a set of inputs/outputs +(natural error function is used) - alglib_impl::ae_state_init(&state); - try - { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); - alglib_impl::mlpunserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(state.error_msg); - } -} +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - set of inputs/outputs; one sample = one row; + first NIn columns contain inputs, + next NOut columns - desired outputs. + SSize - number of elements in XY + Grad - possibly preallocated array. If size of array is smaller + than WCount, it will be reallocated. It is recommended to + reuse previously allocated array to reduce allocation + overhead. -/************************************************************************* -Creates neural network with NIn inputs, NOut outputs, without hidden -layers, with linear output layer. Network weights are filled with small -random values. +OUTPUT PARAMETERS: + E - error function, sum-of-squares for regression networks, + cross-entropy for classification networks. + Grad - gradient of E with respect to weights of network, array[WCount] -- ALGLIB -- Copyright 04.11.2007 by Bochkanov Sergey *************************************************************************/ -void mlpcreate0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network) +void mlpgradnbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreate0(nin, nout, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpgradnbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Same as MLPCreate0, but with one hidden layer (NHid neurons) with -non-linear activation function. Output layer is linear. +Batch Hessian calculation (natural error function) using R-algorithm. +Internal subroutine. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 26.01.2008 by Bochkanov Sergey. + + Hessian calculation based on R-algorithm described in + "Fast Exact Multiplication by the Hessian", + B. A. Pearlmutter, + Neural Computation, 1994. *************************************************************************/ -void mlpcreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network) +void mlphessiannbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreate1(nin, nhid, nout, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlphessiannbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), const_cast(h.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Same as MLPCreate0, but with two hidden layers (NHid1 and NHid2 neurons) -with non-linear activation function. Output layer is linear. - $ALL +Batch Hessian calculation using R-algorithm. +Internal subroutine. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 26.01.2008 by Bochkanov Sergey. + + Hessian calculation based on R-algorithm described in + "Fast Exact Multiplication by the Hessian", + B. A. Pearlmutter, + Neural Computation, 1994. *************************************************************************/ -void mlpcreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network) +void mlphessianbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreate2(nin, nhid1, nhid2, nout, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlphessianbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), const_cast(h.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Creates neural network with NIn inputs, NOut outputs, without hidden -layers with non-linear output layer. Network weights are filled with small -random values. - -Activation function of the output layer takes values: - - (B, +INF), if D>=0 +Calculation of all types of errors on subset of dataset. -or + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. - (-INF, B), if D<0. +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset; one sample = one row; + first NIn columns contain inputs, + next NOut columns - desired outputs. + SetSize - real size of XY, SetSize>=0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. +OUTPUT PARAMETERS: + Rep - it contains all type of errors. -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 04.09.2012 by Bochkanov Sergey *************************************************************************/ -void mlpcreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, multilayerperceptron &network) +void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreateb0(nin, nout, b, d, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpallerrorssubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Same as MLPCreateB0 but with non-linear hidden layer. +Calculation of all types of errors on subset of dataset. - -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey -*************************************************************************/ -void mlpcreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, multilayerperceptron &network) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpcreateb1(nin, nhid, nout, b, d, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. -/************************************************************************* -Same as MLPCreateB0 but with two non-linear hidden layers. +INPUT PARAMETERS: + Network - network initialized with one of the network creation funcs + XY - original dataset given by sparse matrix; + one sample = one row; + first NIn columns contain inputs, + next NOut columns - desired outputs. + SetSize - real size of XY, SetSize>=0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. - -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey -*************************************************************************/ -void mlpcreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, multilayerperceptron &network) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpcreateb2(nin, nhid1, nhid2, nout, b, d, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +OUTPUT PARAMETERS: + Rep - it contains all type of errors. -/************************************************************************* -Creates neural network with NIn inputs, NOut outputs, without hidden -layers with non-linear output layer. Network weights are filled with small -random values. Activation function of the output layer takes values [A,B]. -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 04.09.2012 by Bochkanov Sergey *************************************************************************/ -void mlpcreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, multilayerperceptron &network) +void mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreater0(nin, nout, a, b, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpallerrorssparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Same as MLPCreateR0, but with non-linear hidden layer. +Error of the neural network on subset of dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format; + SetSize - real size of XY, SetSize>=0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. + +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 04.09.2012 by Bochkanov Sergey *************************************************************************/ -void mlpcreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, multilayerperceptron &network) +double mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpcreater1(nin, nhid, nout, a, b, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlperrorsubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Same as MLPCreateR0, but with two non-linear hidden layers. +Error of the neural network on subset of sparse dataset. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + Network - neural network; + XY - training set, see below for information on the + training set format. This function checks correctness + of the dataset (no NANs/INFs, class numbers are + correct) and throws exception when incorrect dataset + is passed. Sparse matrix must use CRS format for + storage. + SetSize - real size of XY, SetSize>=0; + it is used when SubsetSize<0; + Subset - subset of SubsetSize elements, array[SubsetSize]; + SubsetSize- number of elements in Subset[] array: + * if SubsetSize>0, rows of XY with indices Subset[0]... + ...Subset[SubsetSize-1] are processed + * if SubsetSize=0, zeros are returned + * if SubsetSize<0, entire dataset is processed; Subset[] + array is ignored in this case. + +RESULT: + sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + +DATASET FORMAT: + +This function uses two different dataset formats - one for regression +networks, another one for classification networks. + +For regression networks with NIn inputs and NOut outputs following dataset +format is used: +* dataset is given by NPoints*(NIn+NOut) matrix +* each row corresponds to one example +* first NIn columns are inputs, next NOut columns are outputs + +For classification networks with NIn inputs and NClasses clases following +dataset format is used: +* dataset is given by NPoints*(NIn+1) matrix +* each row corresponds to one example +* first NIn columns are inputs, last column stores class number (from 0 to + NClasses-1). -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 04.09.2012 by Bochkanov Sergey *************************************************************************/ -void mlpcreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, multilayerperceptron &network) +double mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreater2(nin, nhid1, nhid2, nout, a, b, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlperrorsparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif +#if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Creates classifier network with NIn inputs and NOut possible classes. -Network contains no hidden layers and linear output layer with SOFTMAX- -normalization (so outputs sums up to 1.0 and converge to posterior -probabilities). +Multiclass Fisher LDA + +Subroutine finds coefficients of linear combination which optimally separates +training set on classes. + +COMMERCIAL EDITION OF ALGLIB: + + ! Commercial version of ALGLIB includes two important improvements of + ! this function, which can be used from C++ and C#: + ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) + ! * multithreading support + ! + ! Intel MKL gives approximately constant (with respect to number of + ! worker threads) acceleration factor which depends on CPU being used, + ! problem size and "baseline" ALGLIB edition which is used for + ! comparison. Best results are achieved for high-dimensional problems + ! (NVars is at least 256). + ! + ! Multithreading is used to accelerate initial phase of LDA, which + ! includes calculation of products of large matrices. Again, for best + ! efficiency problem must be high-dimensional. + ! + ! Generally, commercial ALGLIB is several times faster than open-source + ! generic C edition, and many times faster than open-source C# edition. + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + XY - training set, array[0..NPoints-1,0..NVars]. + First NVars columns store values of independent + variables, next column stores number of class (from 0 + to NClasses-1) which dataset element belongs to. Fractional + values are rounded to nearest integer. + NPoints - training set size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 + + +OUTPUT PARAMETERS: + Info - return code: + * -4, if internal EVD subroutine hasn't converged + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed (NPoints<0, + NVars<1, NClasses<2) + * 1, if task has been solved + * 2, if there was a multicollinearity in training set, + but task has been solved. + W - linear combination coefficients, array[0..NVars-1] -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 31.05.2008 by Bochkanov Sergey *************************************************************************/ -void mlpcreatec0(const ae_int_t nin, const ae_int_t nout, multilayerperceptron &network) +void fisherlda(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_1d_array &w, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreatec0(nin, nout, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::fisherlda(const_cast(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(w.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Same as MLPCreateC0, but with one non-linear hidden layer. +N-dimensional multiclass Fisher LDA - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey +Subroutine finds coefficients of linear combinations which optimally separates +training set on classes. It returns N-dimensional basis whose vector are sorted +by quality of training set separation (in descending order). + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + XY - training set, array[0..NPoints-1,0..NVars]. + First NVars columns store values of independent + variables, next column stores number of class (from 0 + to NClasses-1) which dataset element belongs to. Fractional + values are rounded to nearest integer. + NPoints - training set size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 + + +OUTPUT PARAMETERS: + Info - return code: + * -4, if internal EVD subroutine hasn't converged + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed (NPoints<0, + NVars<1, NClasses<2) + * 1, if task has been solved + * 2, if there was a multicollinearity in training set, + but task has been solved. + W - basis, array[0..NVars-1,0..NVars-1] + columns of matrix stores basis vectors, sorted by + quality of training set separation (in descending order) + + -- ALGLIB -- + Copyright 31.05.2008 by Bochkanov Sergey *************************************************************************/ -void mlpcreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, multilayerperceptron &network) +void fisherldan(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, real_2d_array &w, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreatec1(nin, nhid, nout, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::fisherldan(const_cast(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(w.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif +#if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Same as MLPCreateC0, but with two non-linear hidden layers. +This object stores state of the SSA model. - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey +You should use ALGLIB functions to work with this object. *************************************************************************/ -void mlpcreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, multilayerperceptron &network) +_ssamodel_owner::_ssamodel_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreatec2(nin, nhid1, nhid2, nout, const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_ssamodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::ssamodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::ssamodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::ssamodel)); + alglib_impl::_ssamodel_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_ssamodel_owner::_ssamodel_owner(const _ssamodel_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); + if( p_struct!=NULL ) + { + alglib_impl::_ssamodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: ssamodel copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::ssamodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::ssamodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::ssamodel)); + alglib_impl::_ssamodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -/************************************************************************* -Copying of neural network - -INPUT PARAMETERS: - Network1 - original - -OUTPUT PARAMETERS: - Network2 - copy - - -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey -*************************************************************************/ -void mlpcopy(const multilayerperceptron &network1, multilayerperceptron &network2) +_ssamodel_owner& _ssamodel_owner::operator=(const _ssamodel_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::mlpcopy(const_cast(network1.c_ptr()), const_cast(network2.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: ssamodel assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: ssamodel assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_ssamodel_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::ssamodel)); + alglib_impl::_ssamodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_ssamodel_owner::~_ssamodel_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_ssamodel_destroy(p_struct); + ae_free(p_struct); } } +alglib_impl::ssamodel* _ssamodel_owner::c_ptr() +{ + return p_struct; +} + +alglib_impl::ssamodel* _ssamodel_owner::c_ptr() const +{ + return const_cast(p_struct); +} +ssamodel::ssamodel() : _ssamodel_owner() +{ +} + +ssamodel::ssamodel(const ssamodel &rhs):_ssamodel_owner(rhs) +{ +} + +ssamodel& ssamodel::operator=(const ssamodel &rhs) +{ + if( this==&rhs ) + return *this; + _ssamodel_owner::operator=(rhs); + return *this; +} + +ssamodel::~ssamodel() +{ +} + /************************************************************************* -This function copies tunable parameters (weights/means/sigmas) from one -network to another with same architecture. It performs some rudimentary -checks that architectures are same, and throws exception if check fails. +This function creates SSA model object. Right after creation model is in +"dummy" mode - you can add data, but analyzing/prediction will return +just zeros (it assumes that basis is empty). -It is intended for fast copying of states between two network which are -known to have same geometry. +HOW TO USE SSA MODEL: + +1. create model with ssacreate() +2. add data with one/many ssaaddsequence() calls +3. choose SSA algorithm with one of ssasetalgo...() functions: + * ssasetalgotopkdirect() for direct one-run analysis + * ssasetalgotopkrealtime() for algorithm optimized for many subsequent + runs with warm-start capabilities + * ssasetalgoprecomputed() for user-supplied basis +4. set window width with ssasetwindow() +5. perform one of the analysis-related activities: + a) call ssagetbasis() to get basis + b) call ssaanalyzelast() ssaanalyzesequence() or ssaanalyzelastwindow() + to perform analysis (trend/noise separation) + c) call one of the forecasting functions (ssaforecastlast() or + ssaforecastsequence()) to perform prediction; alternatively, you can + extract linear recurrence coefficients with ssagetlrr(). + SSA analysis will be performed during first call to analysis-related + function. SSA model is smart enough to track all changes in the dataset + and model settings, to cache previously computed basis and to + re-evaluate basis only when necessary. + +Additionally, if your setting involves constant stream of incoming data, +you can perform quick update already calculated model with one of the +incremental append-and-update functions: ssaappendpointandupdate() or +ssaappendsequenceandupdate(). + +NOTE: steps (2), (3), (4) can be performed in arbitrary order. INPUT PARAMETERS: - Network1 - source, must be correctly initialized - Network2 - target, must have same architecture + none OUTPUT PARAMETERS: - Network2 - network state is copied from source to target + S - structure which stores model state -- ALGLIB -- - Copyright 20.06.2013 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpcopytunableparameters(const multilayerperceptron &network1, const multilayerperceptron &network2) +void ssacreate(ssamodel &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcopytunableparameters(const_cast(network1.c_ptr()), const_cast(network2.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssacreate(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Randomization of neural network weights +This function sets window width for SSA model. You should call it before +analysis phase. Default window width is 1 (not for real use). + +Special notes: +* this function call can be performed at any moment before first call to + analysis-related functions +* changing window width invalidates internally stored basis; if you change + window width AFTER you call analysis-related function, next analysis + phase will require re-calculation of the basis according to current + algorithm. +* calling this function with exactly same window width as current one has + no effect +* if you specify window width larger than any data sequence stored in the + model, analysis will return zero basis. + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + WindowWidth - >=1, new window width + +OUTPUT PARAMETERS: + S - SSA model, updated -- ALGLIB -- - Copyright 06.11.2007 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlprandomize(const multilayerperceptron &network) +void ssasetwindow(const ssamodel &s, const ae_int_t windowwidth, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlprandomize(const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetwindow(const_cast(s.c_ptr()), windowwidth, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Randomization of neural network weights and standartisator +This function sets seed which is used to initialize internal RNG when +we make pseudorandom decisions on model updates. + +By default, deterministic seed is used - which results in same sequence of +pseudorandom decisions every time you run SSA model. If you specify non- +deterministic seed value, then SSA model may return slightly different +results after each run. + +This function can be useful when you have several SSA models updated with +sseappendpointandupdate() called with 0(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetseed(const_cast(s.c_ptr()), seed, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Internal subroutine. +This function sets length of power-up cycle for real-time algorithm. + +By default, this algorithm performs costly O(N*WindowWidth^2) init phase +followed by full run of truncated EVD. However, if you are ready to +live with a bit lower-quality basis during first few iterations, you can +split this O(N*WindowWidth^2) initialization between several subsequent +append-and-update rounds. It results in better latency of the algorithm. + +This function invalidates basis/solver, next analysis call will result in +full recalculation of everything. + +INPUT PARAMETERS: + S - SSA model + PWLen - length of the power-up stage: + * 0 means that no power-up is requested + * 1 is the same as 0 + * >1 means that delayed power-up is performed -- ALGLIB -- - Copyright 30.03.2008 by Bochkanov Sergey + Copyright 03.11.2017 by Bochkanov Sergey *************************************************************************/ -void mlpinitpreprocessor(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize) +void ssasetpoweruplength(const ssamodel &s, const ae_int_t pwlen, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpinitpreprocessor(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetpoweruplength(const_cast(s.c_ptr()), pwlen, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Returns information about initialized network: number of inputs, outputs, -weights. +This function sets memory limit of SSA analysis. + +Straightforward SSA with sequence length T and window width W needs O(T*W) +memory. It is possible to reduce memory consumption by splitting task into +smaller chunks. + +Thus function allows you to specify approximate memory limit (measured in +double precision numbers used for buffers). Actual memory consumption will +be comparable to the number specified by you. + +Default memory limit is 50.000.000 (400Mbytes) in current version. + +INPUT PARAMETERS: + S - SSA model + MemLimit- memory limit, >=0. Zero value means no limit. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 20.12.2017 by Bochkanov Sergey *************************************************************************/ -void mlpproperties(const multilayerperceptron &network, ae_int_t &nin, ae_int_t &nout, ae_int_t &wcount) +void ssasetmemorylimit(const ssamodel &s, const ae_int_t memlimit, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpproperties(const_cast(network.c_ptr()), &nin, &nout, &wcount, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetmemorylimit(const_cast(s.c_ptr()), memlimit, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Returns number of inputs. +This function adds data sequence to SSA model. Only single-dimensional +sequences are supported. - -- ALGLIB -- - Copyright 19.10.2011 by Bochkanov Sergey -*************************************************************************/ -ae_int_t mlpgetinputscount(const multilayerperceptron &network) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::mlpgetinputscount(const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +What is a sequences? Following definitions/requirements apply: +* a sequence is an array of values measured in subsequent, equally + separated time moments (ticks). +* you may have many sequences in your dataset; say, one sequence may + correspond to one trading session. +* sequence length should be larger than current window length (shorter + sequences will be ignored during analysis). +* analysis is performed within a sequence; different sequences are NOT + stacked together to produce one large contiguous stream of data. +* analysis is performed for all sequences at once, i.e. same set of basis + vectors is computed for all sequences -/************************************************************************* -Returns number of outputs. +INCREMENTAL ANALYSIS + +This function is non intended for incremental updates of previously found +SSA basis. Calling it invalidates all previous analysis results (basis is +reset and will be recalculated from zero during next analysis). + +If you want to perform incremental/real-time SSA, consider using +following functions: +* ssaappendpointandupdate() for appending one point +* ssaappendsequenceandupdate() for appending new sequence + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - array[N], data, can be larger (additional values + are ignored) + N - data length, can be automatically determined from + the array length. N>=0. + +OUTPUT PARAMETERS: + S - SSA model, updated + +NOTE: you can clear dataset with ssacleardata() -- ALGLIB -- - Copyright 19.10.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -ae_int_t mlpgetoutputscount(const multilayerperceptron &network) +void ssaaddsequence(const ssamodel &s, const real_1d_array &x, const ae_int_t n, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_int_t result = alglib_impl::mlpgetoutputscount(const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaaddsequence(const_cast(s.c_ptr()), const_cast(x.c_ptr()), n, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Returns number of weights. +This function adds data sequence to SSA model. Only single-dimensional +sequences are supported. + +What is a sequences? Following definitions/requirements apply: +* a sequence is an array of values measured in subsequent, equally + separated time moments (ticks). +* you may have many sequences in your dataset; say, one sequence may + correspond to one trading session. +* sequence length should be larger than current window length (shorter + sequences will be ignored during analysis). +* analysis is performed within a sequence; different sequences are NOT + stacked together to produce one large contiguous stream of data. +* analysis is performed for all sequences at once, i.e. same set of basis + vectors is computed for all sequences + +INCREMENTAL ANALYSIS + +This function is non intended for incremental updates of previously found +SSA basis. Calling it invalidates all previous analysis results (basis is +reset and will be recalculated from zero during next analysis). + +If you want to perform incremental/real-time SSA, consider using +following functions: +* ssaappendpointandupdate() for appending one point +* ssaappendsequenceandupdate() for appending new sequence + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - array[N], data, can be larger (additional values + are ignored) + N - data length, can be automatically determined from + the array length. N>=0. + +OUTPUT PARAMETERS: + S - SSA model, updated + +NOTE: you can clear dataset with ssacleardata() -- ALGLIB -- - Copyright 19.10.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -ae_int_t mlpgetweightscount(const multilayerperceptron &network) +#if !defined(AE_NO_EXCEPTIONS) +void ssaaddsequence(const ssamodel &s, const real_1d_array &x, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t n; + + n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::mlpgetweightscount(const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaaddsequence(const_cast(s.c_ptr()), const_cast(x.c_ptr()), n, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -Tells whether network is SOFTMAX-normalized (i.e. classifier) or not. +This function appends single point to last data sequence stored in the SSA +model and tries to update model in the incremental manner (if possible +with current algorithm). + +If you want to add more than one point at once: +* if you want to add M points to the same sequence, perform M-1 calls with + UpdateIts parameter set to 0.0, and last call with non-zero UpdateIts. +* if you want to add new sequence, use ssaappendsequenceandupdate() + +Running time of this function does NOT depend on dataset size, only on +window width and number of singular vectors. Depending on algorithm being +used, incremental update has complexity: +* for top-K real time - O(UpdateIts*K*Width^2), with fractional UpdateIts +* for top-K direct - O(Width^3) for any non-zero UpdateIts +* for precomputed basis - O(1), no update is performed + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - new point + UpdateIts - >=0, floating point (!) value, desired update + frequency: + * zero value means that point is stored, but no + update is performed + * integer part of the value means that specified + number of iterations is always performed + * fractional part of the value means that one + iteration is performed with this probability. + + Recommended value: 0(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaappendpointandupdate(const_cast(s.c_ptr()), x, updateits, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function returns total number of layers (including input, hidden and -output layers). +This function appends new sequence to dataset stored in the SSA model and +tries to update model in the incremental manner (if possible with current +algorithm). + +Notes: +* if you want to add M sequences at once, perform M-1 calls with UpdateIts + parameter set to 0.0, and last call with non-zero UpdateIts. +* if you want to add just one point, use ssaappendpointandupdate() + +Running time of this function does NOT depend on dataset size, only on +sequence length, window width and number of singular vectors. Depending on +algorithm being used, incremental update has complexity: +* for top-K real time - O(UpdateIts*K*Width^2+(NTicks-Width)*Width^2) +* for top-K direct - O(Width^3+(NTicks-Width)*Width^2) +* for precomputed basis - O(1), no update is performed + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - new sequence, array[NTicks] or larget + NTicks - >=1, number of ticks in the sequence + UpdateIts - >=0, floating point (!) value, desired update + frequency: + * zero value means that point is stored, but no + update is performed + * integer part of the value means that specified + number of iterations is always performed + * fractional part of the value means that one + iteration is performed with this probability. + + Recommended value: 0(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaappendsequenceandupdate(const_cast(s.c_ptr()), const_cast(x.c_ptr()), nticks, updateits, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function returns size of K-th layer. +This function appends new sequence to dataset stored in the SSA model and +tries to update model in the incremental manner (if possible with current +algorithm). -K=0 corresponds to input layer, K=CNT-1 corresponds to output layer. +Notes: +* if you want to add M sequences at once, perform M-1 calls with UpdateIts + parameter set to 0.0, and last call with non-zero UpdateIts. +* if you want to add just one point, use ssaappendpointandupdate() -Size of the output layer is always equal to the number of outputs, although -when we have softmax-normalized network, last neuron doesn't have any -connections - it is just zero. +Running time of this function does NOT depend on dataset size, only on +sequence length, window width and number of singular vectors. Depending on +algorithm being used, incremental update has complexity: +* for top-K real time - O(UpdateIts*K*Width^2+(NTicks-Width)*Width^2) +* for top-K direct - O(Width^3+(NTicks-Width)*Width^2) +* for precomputed basis - O(1), no update is performed + +INPUT PARAMETERS: + S - SSA model created with ssacreate() + X - new sequence, array[NTicks] or larget + NTicks - >=1, number of ticks in the sequence + UpdateIts - >=0, floating point (!) value, desired update + frequency: + * zero value means that point is stored, but no + update is performed + * integer part of the value means that specified + number of iterations is always performed + * fractional part of the value means that one + iteration is performed with this probability. + + Recommended value: 0(network.c_ptr()), k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaappendsequenceandupdate(const_cast(s.c_ptr()), const_cast(x.c_ptr()), nticks, updateits, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -This function returns offset/scaling coefficients for I-th input of the -network. +This function sets SSA algorithm to "precomputed vectors" algorithm. + +This algorithm uses precomputed set of orthonormal (orthogonal AND +normalized) basis vectors supplied by user. Thus, basis calculation phase +is not performed - we already have our basis - and only analysis/ +forecasting phase requires actual calculations. + +This algorithm may handle "append" requests which add just one/few ticks +to the end of the last sequence in O(1) time. + +NOTE: this algorithm accepts both basis and window width, because these + two parameters are naturally aligned. Calling this function sets + window width; if you call ssasetwindow() with other window width, + then during analysis stage algorithm will detect conflict and reset + to zero basis. INPUT PARAMETERS: - Network - network - I - input index + S - SSA model + A - array[WindowWidth,NBasis], orthonormalized basis; + this function does NOT control orthogonality and + does NOT perform any kind of renormalization. It + is your responsibility to provide it with correct + basis. + WindowWidth - window width, >=1 + NBasis - number of basis vectors, 1<=NBasis<=WindowWidth OUTPUT PARAMETERS: - Mean - mean term - Sigma - sigma term, guaranteed to be nonzero. + S - updated model -I-th input is passed through linear transformation - IN[i] = (IN[i]-Mean)/Sigma -before feeding to the network +NOTE: calling this function invalidates basis in all cases. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpgetinputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma) +void ssasetalgoprecomputed(const ssamodel &s, const real_2d_array &a, const ae_int_t windowwidth, const ae_int_t nbasis, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpgetinputscaling(const_cast(network.c_ptr()), i, &mean, &sigma, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetalgoprecomputed(const_cast(s.c_ptr()), const_cast(a.c_ptr()), windowwidth, nbasis, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function returns offset/scaling coefficients for I-th output of the -network. +This function sets SSA algorithm to "precomputed vectors" algorithm. + +This algorithm uses precomputed set of orthonormal (orthogonal AND +normalized) basis vectors supplied by user. Thus, basis calculation phase +is not performed - we already have our basis - and only analysis/ +forecasting phase requires actual calculations. + +This algorithm may handle "append" requests which add just one/few ticks +to the end of the last sequence in O(1) time. + +NOTE: this algorithm accepts both basis and window width, because these + two parameters are naturally aligned. Calling this function sets + window width; if you call ssasetwindow() with other window width, + then during analysis stage algorithm will detect conflict and reset + to zero basis. INPUT PARAMETERS: - Network - network - I - input index + S - SSA model + A - array[WindowWidth,NBasis], orthonormalized basis; + this function does NOT control orthogonality and + does NOT perform any kind of renormalization. It + is your responsibility to provide it with correct + basis. + WindowWidth - window width, >=1 + NBasis - number of basis vectors, 1<=NBasis<=WindowWidth OUTPUT PARAMETERS: - Mean - mean term - Sigma - sigma term, guaranteed to be nonzero. + S - updated model -I-th output is passed through linear transformation - OUT[i] = OUT[i]*Sigma+Mean -before returning it to user. In case we have SOFTMAX-normalized network, -we return (Mean,Sigma)=(0.0,1.0). +NOTE: calling this function invalidates basis in all cases. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpgetoutputscaling(const multilayerperceptron &network, const ae_int_t i, double &mean, double &sigma) +#if !defined(AE_NO_EXCEPTIONS) +void ssasetalgoprecomputed(const ssamodel &s, const real_2d_array &a, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t windowwidth; + ae_int_t nbasis; + + windowwidth = a.rows(); + nbasis = a.cols(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpgetoutputscaling(const_cast(network.c_ptr()), i, &mean, &sigma, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetalgoprecomputed(const_cast(s.c_ptr()), const_cast(a.c_ptr()), windowwidth, nbasis, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -This function returns information about Ith neuron of Kth layer +This function sets SSA algorithm to "direct top-K" algorithm. + +"Direct top-K" algorithm performs full SVD of the N*WINDOW trajectory +matrix (hence its name - direct solver is used), then extracts top K +components. Overall running time is O(N*WINDOW^2), where N is a number of +ticks in the dataset, WINDOW is window width. + +This algorithm may handle "append" requests which add just one/few ticks +to the end of the last sequence in O(WINDOW^3) time, which is ~N/WINDOW +times faster than re-computing everything from scratch. INPUT PARAMETERS: - Network - network - K - layer index - I - neuron index (within layer) + S - SSA model + TopK - number of components to analyze; TopK>=1. OUTPUT PARAMETERS: - FKind - activation function type (used by MLPActivationFunction()) - this value is zero for input or linear neurons - Threshold - also called offset, bias - zero for input neurons + S - updated model -NOTE: this function throws exception if layer or neuron with given index -do not exists. + +NOTE: TopK>WindowWidth is silently decreased to WindowWidth during analysis + phase + +NOTE: calling this function invalidates basis, except for the situation + when this algorithm was already set with same parameters. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpgetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, ae_int_t &fkind, double &threshold) +void ssasetalgotopkdirect(const ssamodel &s, const ae_int_t topk, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpgetneuroninfo(const_cast(network.c_ptr()), k, i, &fkind, &threshold, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetalgotopkdirect(const_cast(s.c_ptr()), topk, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function returns information about connection from I0-th neuron of -K0-th layer to I1-th neuron of K1-th layer. +This function sets SSA algorithm to "top-K real time algorithm". This algo +extracts K components with largest singular values. + +It is real-time version of top-K algorithm which is optimized for +incremental processing and fast start-up. Internally it uses subspace +eigensolver for truncated SVD. It results in ability to perform quick +updates of the basis when only a few points/sequences is added to dataset. + +Performance profile of the algorithm is given below: +* O(K*WindowWidth^2) running time for incremental update of the dataset + with one of the "append-and-update" functions (ssaappendpointandupdate() + or ssaappendsequenceandupdate()). +* O(N*WindowWidth^2) running time for initial basis evaluation (N=size of + dataset) +* ability to split costly initialization across several incremental + updates of the basis (so called "Power-Up" functionality, activated by + ssasetpoweruplength() function) INPUT PARAMETERS: - Network - network - K0 - layer index - I0 - neuron index (within layer) - K1 - layer index - I1 - neuron index (within layer) + S - SSA model + TopK - number of components to analyze; TopK>=1. -RESULT: - connection weight (zero for non-existent connections) +OUTPUT PARAMETERS: + S - updated model -This function: -1. throws exception if layer or neuron with given index do not exists. -2. returns zero if neurons exist, but there is no connection between them +NOTE: this algorithm is optimized for large-scale tasks with large + datasets. On toy problems with just 5-10 points it can return basis + which is slightly different from that returned by direct algorithm + (ssasetalgotopkdirect() function). However, the difference becomes + negligible as dataset grows. + +NOTE: TopK>WindowWidth is silently decreased to WindowWidth during analysis + phase + +NOTE: calling this function invalidates basis, except for the situation + when this algorithm was already set with same parameters. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -double mlpgetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1) +void ssasetalgotopkrealtime(const ssamodel &s, const ae_int_t topk, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlpgetweight(const_cast(network.c_ptr()), k0, i0, k1, i1, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssasetalgotopkrealtime(const_cast(s.c_ptr()), topk, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function sets offset/scaling coefficients for I-th input of the -network. +This function clears all data stored in the model and invalidates all +basis components found so far. INPUT PARAMETERS: - Network - network - I - input index - Mean - mean term - Sigma - sigma term (if zero, will be replaced by 1.0) + S - SSA model created with ssacreate() -NTE: I-th input is passed through linear transformation - IN[i] = (IN[i]-Mean)/Sigma -before feeding to the network. This function sets Mean and Sigma. +OUTPUT PARAMETERS: + S - SSA model, updated -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpsetinputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma) +void ssacleardata(const ssamodel &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetinputscaling(const_cast(network.c_ptr()), i, mean, sigma, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssacleardata(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function sets offset/scaling coefficients for I-th output of the -network. +This function executes SSA on internally stored dataset and returns basis +found by current method. INPUT PARAMETERS: - Network - network - I - input index - Mean - mean term - Sigma - sigma term (if zero, will be replaced by 1.0) + S - SSA model OUTPUT PARAMETERS: + A - array[WindowWidth,NBasis], basis; vectors are + stored in matrix columns, by descreasing variance + SV - array[NBasis]: + * zeros - for model initialized with SSASetAlgoPrecomputed() + * singular values - for other algorithms + WindowWidth - current window + NBasis - basis size -NOTE: I-th output is passed through linear transformation - OUT[i] = OUT[i]*Sigma+Mean -before returning it to user. This function sets Sigma/Mean. In case we -have SOFTMAX-normalized network, you can not set (Sigma,Mean) to anything -other than(0.0,1.0) - this function will throw exception. + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Calling this function in degenerate cases (no data or all data are +shorter than window size; no algorithm is specified) returns basis with +just one zero vector. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpsetoutputscaling(const multilayerperceptron &network, const ae_int_t i, const double mean, const double sigma) +void ssagetbasis(const ssamodel &s, real_2d_array &a, real_1d_array &sv, ae_int_t &windowwidth, ae_int_t &nbasis, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetoutputscaling(const_cast(network.c_ptr()), i, mean, sigma, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssagetbasis(const_cast(s.c_ptr()), const_cast(a.c_ptr()), const_cast(sv.c_ptr()), &windowwidth, &nbasis, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function modifies information about Ith neuron of Kth layer +This function returns linear recurrence relation (LRR) coefficients found +by current SSA algorithm. INPUT PARAMETERS: - Network - network - K - layer index - I - neuron index (within layer) - FKind - activation function type (used by MLPActivationFunction()) - this value must be zero for input neurons - (you can not set activation function for input neurons) - Threshold - also called offset, bias - this value must be zero for input neurons - (you can not set threshold for input neurons) + S - SSA model + +OUTPUT PARAMETERS: + A - array[WindowWidth-1]. Coefficients of the + linear recurrence of the form: + X[W-1] = X[W-2]*A[W-2] + X[W-3]*A[W-3] + ... + X[0]*A[0]. + Empty array for WindowWidth=1. + WindowWidth - current window width -NOTES: -1. this function throws exception if layer or neuron with given index do - not exists. -2. this function also throws exception when you try to set non-linear - activation function for input neurons (any kind of network) or for output - neurons of classifier network. -3. this function throws exception when you try to set non-zero threshold for - input neurons (any kind of network). + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Calling this function in degenerate cases (no data or all data are +shorter than window size; no algorithm is specified) returns zeros. -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpsetneuroninfo(const multilayerperceptron &network, const ae_int_t k, const ae_int_t i, const ae_int_t fkind, const double threshold) +void ssagetlrr(const ssamodel &s, real_1d_array &a, ae_int_t &windowwidth, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetneuroninfo(const_cast(network.c_ptr()), k, i, fkind, threshold, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssagetlrr(const_cast(s.c_ptr()), const_cast(a.c_ptr()), &windowwidth, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -This function modifies information about connection from I0-th neuron of -K0-th layer to I1-th neuron of K1-th layer. +This function executes SSA on internally stored dataset and returns +analysis for the last window of the last sequence. Such analysis is +an lightweight alternative for full scale reconstruction (see below). + +Typical use case for this function is real-time setting, when you are +interested in quick-and-dirty (very quick and very dirty) processing of +just a few last ticks of the trend. + +IMPORTANT: full scale SSA involves analysis of the ENTIRE dataset, + with reconstruction being done for all positions of sliding + window with subsequent hankelization (diagonal averaging) of + the resulting matrix. + + Such analysis requires O((DataLen-Window)*Window*NBasis) FLOPs + and can be quite costly. However, it has nice noise-canceling + effects due to averaging. + + This function performs REDUCED analysis of the last window. It + is much faster - just O(Window*NBasis), but its results are + DIFFERENT from that of ssaanalyzelast(). In particular, first + few points of the trend are much more prone to noise. INPUT PARAMETERS: - Network - network - K0 - layer index - I0 - neuron index (within layer) - K1 - layer index - I1 - neuron index (within layer) - W - connection weight (must be zero for non-existent - connections) + S - SSA model + +OUTPUT PARAMETERS: + Trend - array[WindowSize], reconstructed trend line + Noise - array[WindowSize], the rest of the signal; + it holds that ActualData = Trend+Noise. + NTicks - current WindowSize + + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + +In any case, only basis is reused. Reconstruction is performed from +scratch every time you call this function. -This function: -1. throws exception if layer or neuron with given index do not exists. -2. throws exception if you try to set non-zero weight for non-existent - connection + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the window length (analysis can be done, + but we can not perform reconstruction on the last sequence) + +Calling this function in degenerate cases returns following result: +* in any case, WindowWidth ticks is returned +* trend is assumed to be zero +* noise is initialized by the last sequence; if last sequence is shorter + than the window size, it is moved to the end of the array, and the + beginning of the noise array is filled by zeros + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is constructed). -- ALGLIB -- - Copyright 25.03.2011 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpsetweight(const multilayerperceptron &network, const ae_int_t k0, const ae_int_t i0, const ae_int_t k1, const ae_int_t i1, const double w) +void ssaanalyzelastwindow(const ssamodel &s, real_1d_array &trend, real_1d_array &noise, ae_int_t &nticks, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetweight(const_cast(network.c_ptr()), k0, i0, k1, i1, w, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaanalyzelastwindow(const_cast(s.c_ptr()), const_cast(trend.c_ptr()), const_cast(noise.c_ptr()), &nticks, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Neural network activation function +This function: +* builds SSA basis using internally stored (entire) dataset +* returns reconstruction for the last NTicks of the last sequence + +If you want to analyze some other sequence, use ssaanalyzesequence(). + +Reconstruction phase involves generation of NTicks-WindowWidth sliding +windows, their decomposition using empirical orthogonal functions found by +SSA, followed by averaging of each data point across several overlapping +windows. Thus, every point in the output trend is reconstructed using up +to WindowWidth overlapping windows (WindowWidth windows exactly in the +inner points, just one window at the extremal points). + +IMPORTANT: due to averaging this function returns different results for + different values of NTicks. It is expected and not a bug. + + For example: + * Trend[NTicks-1] is always same because it is not averaged in + any case (same applies to Trend[0]). + * Trend[NTicks-2] has different values for NTicks=WindowWidth + and NTicks=WindowWidth+1 because former case means that no + averaging is performed, and latter case means that averaging + using two sliding windows is performed. Larger values of + NTicks produce same results as NTicks=WindowWidth+1. + * ...and so on... + +PERFORMANCE: this function has O((NTicks-WindowWidth)*WindowWidth*NBasis) + running time. If you work in time-constrained setting and + have to analyze just a few last ticks, choosing NTicks equal + to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth + will result in good compromise between noise cancellation and + analysis speed. INPUT PARAMETERS: - NET - neuron input - K - function index (zero for linear function) + S - SSA model + NTicks - number of ticks to analyze, Nticks>=1. + * special case of NTicks<=WindowWidth is handled + by analyzing last window and returning NTicks + last ticks. + * special case NTicks>LastSequenceLen is handled + by prepending result with NTicks-LastSequenceLen + zeros. OUTPUT PARAMETERS: - F - function - DF - its derivative - D2F - its second derivative + Trend - array[NTicks], reconstructed trend line + Noise - array[NTicks], the rest of the signal; + it holds that ActualData = Trend+Noise. + + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + +In any case, only basis is reused. Reconstruction is performed from +scratch every time you call this function. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the window length (analysis can be done, + but we can not perform reconstruction on the last sequence) + +Calling this function in degenerate cases returns following result: +* in any case, NTicks ticks is returned +* trend is assumed to be zero +* noise is initialized by the last sequence; if last sequence is shorter + than the window size, it is moved to the end of the array, and the + beginning of the noise array is filled by zeros + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is constructed). -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpactivationfunction(const double net, const ae_int_t k, double &f, double &df, double &d2f) +void ssaanalyzelast(const ssamodel &s, const ae_int_t nticks, real_1d_array &trend, real_1d_array &noise, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpactivationfunction(net, k, &f, &df, &d2f, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaanalyzelast(const_cast(s.c_ptr()), nticks, const_cast(trend.c_ptr()), const_cast(noise.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Procesing +This function: +* builds SSA basis using internally stored (entire) dataset +* returns reconstruction for the sequence being passed to this function + +If you want to analyze last sequence stored in the model, use +ssaanalyzelast(). + +Reconstruction phase involves generation of NTicks-WindowWidth sliding +windows, their decomposition using empirical orthogonal functions found by +SSA, followed by averaging of each data point across several overlapping +windows. Thus, every point in the output trend is reconstructed using up +to WindowWidth overlapping windows (WindowWidth windows exactly in the +inner points, just one window at the extremal points). + +PERFORMANCE: this function has O((NTicks-WindowWidth)*WindowWidth*NBasis) + running time. If you work in time-constrained setting and + have to analyze just a few last ticks, choosing NTicks equal + to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth + will result in good compromise between noise cancellation and + analysis speed. INPUT PARAMETERS: - Network - neural network - X - input vector, array[0..NIn-1]. + S - SSA model + Data - array[NTicks], can be larger (only NTicks leading + elements will be used) + NTicks - number of ticks to analyze, Nticks>=1. + * special case of NTicks(network.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +CACHING/REUSE OF THE BASIS -/************************************************************************* -'interactive' variant of MLPProcess for languages like Python which -support constructs like "Y = MLPProcess(NN,X)" and interactive mode of the -interpreter +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. +In any case, only basis is reused. Reconstruction is performed from +scratch every time you call this function. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* sequence being passed is shorter than the window length + +Calling this function in degenerate cases returns following result: +* in any case, NTicks ticks is returned +* trend is assumed to be zero +* noise is initialized by the sequence. + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is constructed). -- ALGLIB -- - Copyright 21.09.2010 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -void mlpprocessi(const multilayerperceptron &network, const real_1d_array &x, real_1d_array &y) +void ssaanalyzesequence(const ssamodel &s, const real_1d_array &data, const ae_int_t nticks, real_1d_array &trend, real_1d_array &noise, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpprocessi(const_cast(network.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaanalyzesequence(const_cast(s.c_ptr()), const_cast(data.c_ptr()), nticks, const_cast(trend.c_ptr()), const_cast(noise.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Error of the neural network on dataset. - +This function: +* builds SSA basis using internally stored (entire) dataset +* returns reconstruction for the sequence being passed to this function -FOR USERS OF COMMERCIAL EDITION: +If you want to analyze last sequence stored in the model, use +ssaanalyzelast(). - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x, depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +Reconstruction phase involves generation of NTicks-WindowWidth sliding +windows, their decomposition using empirical orthogonal functions found by +SSA, followed by averaging of each data point across several overlapping +windows. Thus, every point in the output trend is reconstructed using up +to WindowWidth overlapping windows (WindowWidth windows exactly in the +inner points, just one window at the extremal points). +PERFORMANCE: this function has O((NTicks-WindowWidth)*WindowWidth*NBasis) + running time. If you work in time-constrained setting and + have to analyze just a few last ticks, choosing NTicks equal + to WindowWidth+SmoothingLen, with SmoothingLen=1...WindowWidth + will result in good compromise between noise cancellation and + analysis speed. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. + S - SSA model + Data - array[NTicks], can be larger (only NTicks leading + elements will be used) + NTicks - number of ticks to analyze, Nticks>=1. + * special case of NTicks(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t nticks; -double smp_mlperror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; + nticks = data.length(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlperror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaanalyzesequence(const_cast(s.c_ptr()), const_cast(data.c_ptr()), nticks, const_cast(trend.c_ptr()), const_cast(noise.c_ptr()), &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -Error of the neural network on dataset given by sparse matrix. +This function builds SSA basis and performs forecasting for a specified +number of ticks, returning value of trend. +Forecast is performed as follows: +* SSA trend extraction is applied to last WindowWidth elements of the + internally stored dataset; this step is basically a noise reduction. +* linear recurrence relation is applied to extracted trend -FOR USERS OF COMMERCIAL EDITION: +This function has following running time: +* O(NBasis*WindowWidth) for trend extraction phase (always performed) +* O(WindowWidth*NTicks) for forecast phase - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x, depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +NOTE: noise reduction is ALWAYS applied by this algorithm; if you want to + apply recurrence relation to raw unprocessed data, use another + function - ssaforecastsequence() which allows to turn on and off + noise reduction phase. +NOTE: this algorithm performs prediction using only one - last - sliding + window. Predictions produced by such approach are smooth + continuations of the reconstructed trend line, but they can be + easily corrupted by noise. If you need noise-resistant prediction, + use ssaforecastavglast() function, which averages predictions built + using several sliding windows. INPUT PARAMETERS: - Network - neural network - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0 - -RESULT: - sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) + S - SSA model + NTicks - number of ticks to forecast, NTicks>=1 -DATASET FORMAT: +OUTPUT PARAMETERS: + Trend - array[NTicks], predicted trend line -This function uses two different dataset formats - one for regression -networks, another one for classification networks. -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +CACHING/REUSE OF THE BASIS -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. - -- ALGLIB -- - Copyright 23.07.2012 by Bochkanov Sergey -*************************************************************************/ -double mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlperrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +HANDLING OF DEGENERATE CASES -double smp_mlperrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlperrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). -/************************************************************************* -Natural error function for neural network, internal subroutine. +Calling this function in degenerate cases returns following result: +* NTicks copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=NTicks is returned for empty task -NOTE: this function is single-threaded. Unlike other error function, it -receives no speed-up from being executed in SMP mode. +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -double mlperrorn(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize) +void ssaforecastlast(const ssamodel &s, const ae_int_t nticks, real_1d_array &trend, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlperrorn(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaforecastlast(const_cast(s.c_ptr()), nticks, const_cast(trend.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Classification error of the neural network on dataset. +This function builds SSA basis and performs forecasting for a user- +specified sequence, returning value of trend. +Forecasting is done in two stages: +* first, we extract trend from the WindowWidth last elements of the + sequence. This stage is optional, you can turn it off if you pass + data which are already processed with SSA. Of course, you can turn it + off even for raw data, but it is not recommended - noise suppression is + very important for correct prediction. +* then, we apply LRR for last WindowWidth-1 elements of the extracted + trend. -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +This function has following running time: +* O(NBasis*WindowWidth) for trend extraction phase +* O(WindowWidth*NTicks) for forecast phase +NOTE: this algorithm performs prediction using only one - last - sliding + window. Predictions produced by such approach are smooth + continuations of the reconstructed trend line, but they can be + easily corrupted by noise. If you need noise-resistant prediction, + use ssaforecastavgsequence() function, which averages predictions + built using several sliding windows. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. + S - SSA model + Data - array[NTicks], data to forecast + DataLen - number of ticks in the data, DataLen>=1 + ForecastLen - number of ticks to predict, ForecastLen>=1 + ApplySmoothing - whether to apply smoothing trend extraction or not; + if you do not know what to specify, pass True. -RESULT: - classification error (number of misclassified cases) +OUTPUT PARAMETERS: + Trend - array[ForecastLen], forecasted trend -DATASET FORMAT: -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +CACHING/REUSE OF THE BASIS -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* data sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). + +Calling this function in degenerate cases returns following result: +* ForecastLen copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=ForecastLen is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -ae_int_t mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +void ssaforecastsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t datalen, const ae_int_t forecastlen, const bool applysmoothing, real_1d_array &trend, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_int_t result = alglib_impl::mlpclserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } -} - + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaforecastsequence(const_cast(s.c_ptr()), const_cast(data.c_ptr()), datalen, forecastlen, applysmoothing, const_cast(trend.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function builds SSA basis and performs forecasting for a user- +specified sequence, returning value of trend. + +Forecasting is done in two stages: +* first, we extract trend from the WindowWidth last elements of the + sequence. This stage is optional, you can turn it off if you pass + data which are already processed with SSA. Of course, you can turn it + off even for raw data, but it is not recommended - noise suppression is + very important for correct prediction. +* then, we apply LRR for last WindowWidth-1 elements of the extracted + trend. + +This function has following running time: +* O(NBasis*WindowWidth) for trend extraction phase +* O(WindowWidth*NTicks) for forecast phase + +NOTE: this algorithm performs prediction using only one - last - sliding + window. Predictions produced by such approach are smooth + continuations of the reconstructed trend line, but they can be + easily corrupted by noise. If you need noise-resistant prediction, + use ssaforecastavgsequence() function, which averages predictions + built using several sliding windows. + +INPUT PARAMETERS: + S - SSA model + Data - array[NTicks], data to forecast + DataLen - number of ticks in the data, DataLen>=1 + ForecastLen - number of ticks to predict, ForecastLen>=1 + ApplySmoothing - whether to apply smoothing trend extraction or not; + if you do not know what to specify, pass True. + +OUTPUT PARAMETERS: + Trend - array[ForecastLen], forecasted trend + + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* data sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). + +Calling this function in degenerate cases returns following result: +* ForecastLen copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=ForecastLen is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). -ae_int_t smp_mlpclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey +*************************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) +void ssaforecastsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t forecastlen, real_1d_array &trend, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t datalen; + bool applysmoothing; + + datalen = data.length(); + applysmoothing = true; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::_pexec_mlpclserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaforecastsequence(const_cast(s.c_ptr()), const_cast(data.c_ptr()), datalen, forecastlen, applysmoothing, const_cast(trend.c_ptr()), &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -Relative classification error on the test set. +This function builds SSA basis and performs forecasting for a specified +number of ticks, returning value of trend. +Forecast is performed as follows: +* SSA trend extraction is applied to last M sliding windows of the + internally stored dataset +* for each of M sliding windows, M predictions are built +* average value of M predictions is returned -FOR USERS OF COMMERCIAL EDITION: +This function has following running time: +* O(NBasis*WindowWidth*M) for trend extraction phase (always performed) +* O(WindowWidth*NTicks*M) for forecast phase - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +NOTE: noise reduction is ALWAYS applied by this algorithm; if you want to + apply recurrence relation to raw unprocessed data, use another + function - ssaforecastsequence() which allows to turn on and off + noise reduction phase. +NOTE: combination of several predictions results in lesser sensitivity to + noise, but it may produce undesirable discontinuities between last + point of the trend and first point of the prediction. The reason is + that last point of the trend is usually corrupted by noise, but + average value of several predictions is less sensitive to noise, + thus discontinuity appears. It is not a bug. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. + S - SSA model + M - number of sliding windows to combine, M>=1. If + your dataset has less than M sliding windows, this + parameter will be silently reduced. + NTicks - number of ticks to forecast, NTicks>=1 -RESULT: -Percent of incorrectly classified cases. Works both for classifier -networks and general purpose networks used as classifiers. +OUTPUT PARAMETERS: + Trend - array[NTicks], predicted trend line -DATASET FORMAT: -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +CACHING/REUSE OF THE BASIS -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* last sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). + +Calling this function in degenerate cases returns following result: +* NTicks copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=NTicks is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). -- ALGLIB -- - Copyright 25.12.2008 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -double mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +void ssaforecastavglast(const ssamodel &s, const ae_int_t m, const ae_int_t nticks, real_1d_array &trend, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlprelclserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaforecastavglast(const_cast(s.c_ptr()), m, nticks, const_cast(trend.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +This function builds SSA basis and performs forecasting for a user- +specified sequence, returning value of trend. -double smp_mlprelclserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlprelclserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} +Forecasting is done in two stages: +* first, we extract trend from M last sliding windows of the sequence. + This stage is optional, you can turn it off if you pass data which + are already processed with SSA. Of course, you can turn it off even + for raw data, but it is not recommended - noise suppression is very + important for correct prediction. +* then, we apply LRR independently for M sliding windows +* average of M predictions is returned -/************************************************************************* -Relative classification error on the test set given by sparse matrix. +This function has following running time: +* O(NBasis*WindowWidth*M) for trend extraction phase +* O(WindowWidth*NTicks*M) for forecast phase +NOTE: combination of several predictions results in lesser sensitivity to + noise, but it may produce undesirable discontinuities between last + point of the trend and first point of the prediction. The reason is + that last point of the trend is usually corrupted by noise, but + average value of several predictions is less sensitive to noise, + thus discontinuity appears. It is not a bug. -FOR USERS OF COMMERCIAL EDITION: +INPUT PARAMETERS: + S - SSA model + Data - array[NTicks], data to forecast + DataLen - number of ticks in the data, DataLen>=1 + M - number of sliding windows to combine, M>=1. If + your dataset has less than M sliding windows, this + parameter will be silently reduced. + ForecastLen - number of ticks to predict, ForecastLen>=1 + ApplySmoothing - whether to apply smoothing trend extraction or not. + if you do not know what to specify, pass true. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +OUTPUT PARAMETERS: + Trend - array[ForecastLen], forecasted trend -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. Sparse matrix must use CRS format - for storage. - NPoints - points count, >=0. +CACHING/REUSE OF THE BASIS -RESULT: -Percent of incorrectly classified cases. Works both for classifier -networks and general purpose networks used as classifiers. +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. -DATASET FORMAT: -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +HANDLING OF DEGENERATE CASES -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* data sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +Calling this function in degenerate cases returns following result: +* ForecastLen copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=ForecastLen is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 30.10.2017 by Bochkanov Sergey *************************************************************************/ -double mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) +void ssaforecastavgsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t datalen, const ae_int_t m, const ae_int_t forecastlen, const bool applysmoothing, real_1d_array &trend, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlprelclserrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaforecastavgsequence(const_cast(s.c_ptr()), const_cast(data.c_ptr()), datalen, m, forecastlen, applysmoothing, const_cast(trend.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function builds SSA basis and performs forecasting for a user- +specified sequence, returning value of trend. + +Forecasting is done in two stages: +* first, we extract trend from M last sliding windows of the sequence. + This stage is optional, you can turn it off if you pass data which + are already processed with SSA. Of course, you can turn it off even + for raw data, but it is not recommended - noise suppression is very + important for correct prediction. +* then, we apply LRR independently for M sliding windows +* average of M predictions is returned + +This function has following running time: +* O(NBasis*WindowWidth*M) for trend extraction phase +* O(WindowWidth*NTicks*M) for forecast phase + +NOTE: combination of several predictions results in lesser sensitivity to + noise, but it may produce undesirable discontinuities between last + point of the trend and first point of the prediction. The reason is + that last point of the trend is usually corrupted by noise, but + average value of several predictions is less sensitive to noise, + thus discontinuity appears. It is not a bug. + +INPUT PARAMETERS: + S - SSA model + Data - array[NTicks], data to forecast + DataLen - number of ticks in the data, DataLen>=1 + M - number of sliding windows to combine, M>=1. If + your dataset has less than M sliding windows, this + parameter will be silently reduced. + ForecastLen - number of ticks to predict, ForecastLen>=1 + ApplySmoothing - whether to apply smoothing trend extraction or not. + if you do not know what to specify, pass true. + +OUTPUT PARAMETERS: + Trend - array[ForecastLen], forecasted trend + + +CACHING/REUSE OF THE BASIS + +Caching/reuse of previous results is performed: +* first call performs full run of SSA; basis is stored in the cache +* subsequent calls reuse previously cached basis +* if you call any function which changes model properties (window length, + algorithm, dataset), internal basis will be invalidated. +* the only calls which do NOT invalidate basis are listed below: + a) ssasetwindow() with same window length + b) ssaappendpointandupdate() + c) ssaappendsequenceandupdate() + d) ssasetalgotopk...() with exactly same K + Calling these functions will result in reuse of previously found basis. + + +HANDLING OF DEGENERATE CASES + +Following degenerate cases may happen: +* dataset is empty (no analysis can be done) +* all sequences are shorter than the window length,no analysis can be done +* no algorithm is specified (no analysis can be done) +* data sequence is shorter than the WindowWidth (analysis can be done, + but we can not perform forecasting on the last sequence) +* window lentgh is 1 (impossible to use for forecasting) +* SSA analysis algorithm is configured to extract basis whose size is + equal to window length (impossible to use for forecasting; only basis + whose size is less than window length can be used). + +Calling this function in degenerate cases returns following result: +* ForecastLen copies of the last value is returned for non-empty task with + large enough dataset, but with overcomplete basis (window width=1 or + basis size is equal to window width) +* zero trend with length=ForecastLen is returned for empty task + +No analysis is performed in degenerate cases (we immediately return dummy +values, no basis is ever constructed). + + -- ALGLIB -- + Copyright 30.10.2017 by Bochkanov Sergey +*************************************************************************/ +#if !defined(AE_NO_EXCEPTIONS) +void ssaforecastavgsequence(const ssamodel &s, const real_1d_array &data, const ae_int_t m, const ae_int_t forecastlen, real_1d_array &trend, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t datalen; + bool applysmoothing; + + datalen = data.length(); + applysmoothing = true; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ssaforecastavgsequence(const_cast(s.c_ptr()), const_cast(data.c_ptr()), datalen, m, forecastlen, applysmoothing, const_cast(trend.c_ptr()), &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif +#endif +#if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* -double smp_mlprelclserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) +*************************************************************************/ +_linearmodel_owner::_linearmodel_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - double result = alglib_impl::_pexec_mlprelclserrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); + if( p_struct!=NULL ) + { + alglib_impl::_linearmodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::linearmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::linearmodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::linearmodel)); + alglib_impl::_linearmodel_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_linearmodel_owner::_linearmodel_owner(const _linearmodel_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); + if( p_struct!=NULL ) + { + alglib_impl::_linearmodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: linearmodel copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::linearmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::linearmodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::linearmodel)); + alglib_impl::_linearmodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -/************************************************************************* -Average cross-entropy (in bits per element) on the test set. - - -FOR USERS OF COMMERCIAL EDITION: +_linearmodel_owner& _linearmodel_owner::operator=(const _linearmodel_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: linearmodel assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: linearmodel assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_linearmodel_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::linearmodel)); + alglib_impl::_linearmodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +_linearmodel_owner::~_linearmodel_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_linearmodel_destroy(p_struct); + ae_free(p_struct); + } +} +alglib_impl::linearmodel* _linearmodel_owner::c_ptr() +{ + return p_struct; +} -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. +alglib_impl::linearmodel* _linearmodel_owner::c_ptr() const +{ + return const_cast(p_struct); +} +linearmodel::linearmodel() : _linearmodel_owner() +{ +} -RESULT: -CrossEntropy/(NPoints*LN(2)). -Zero if network solves regression task. +linearmodel::linearmodel(const linearmodel &rhs):_linearmodel_owner(rhs) +{ +} -DATASET FORMAT: +linearmodel& linearmodel::operator=(const linearmodel &rhs) +{ + if( this==&rhs ) + return *this; + _linearmodel_owner::operator=(rhs); + return *this; +} -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +linearmodel::~linearmodel() +{ +} -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +/************************************************************************* +LRReport structure contains additional information about linear model: +* C - covariation matrix, array[0..NVars,0..NVars]. + C[i,j] = Cov(A[i],A[j]) +* RMSError - root mean square error on a training set +* AvgError - average error on a training set +* AvgRelError - average relative error on a training set (excluding + observations with zero function value). +* CVRMSError - leave-one-out cross-validation estimate of + generalization error. Calculated using fast algorithm + with O(NVars*NPoints) complexity. +* CVAvgError - cross-validation estimate of average error +* CVAvgRelError - cross-validation estimate of average relative error - -- ALGLIB -- - Copyright 08.01.2009 by Bochkanov Sergey +All other fields of the structure are intended for internal use and should +not be used outside ALGLIB. *************************************************************************/ -double mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +_lrreport_owner::_lrreport_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - double result = alglib_impl::mlpavgce(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); + if( p_struct!=NULL ) + { + alglib_impl::_lrreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::lrreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::lrreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::lrreport)); + alglib_impl::_lrreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_lrreport_owner::_lrreport_owner(const _lrreport_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); + if( p_struct!=NULL ) + { + alglib_impl::_lrreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: lrreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::lrreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::lrreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::lrreport)); + alglib_impl::_lrreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } - -double smp_mlpavgce(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +_lrreport_owner& _lrreport_owner::operator=(const _lrreport_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - double result = alglib_impl::_pexec_mlpavgce(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: lrreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: lrreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_lrreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::lrreport)); + alglib_impl::_lrreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_lrreport_owner::~_lrreport_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_lrreport_destroy(p_struct); + ae_free(p_struct); } } -/************************************************************************* -Average cross-entropy (in bits per element) on the test set given by -sparse matrix. +alglib_impl::lrreport* _lrreport_owner::c_ptr() +{ + return p_struct; +} +alglib_impl::lrreport* _lrreport_owner::c_ptr() const +{ + return const_cast(p_struct); +} +lrreport::lrreport() : _lrreport_owner() ,c(&p_struct->c),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),cvrmserror(p_struct->cvrmserror),cvavgerror(p_struct->cvavgerror),cvavgrelerror(p_struct->cvavgrelerror),ncvdefects(p_struct->ncvdefects),cvdefects(&p_struct->cvdefects) +{ +} -FOR USERS OF COMMERCIAL EDITION: +lrreport::lrreport(const lrreport &rhs):_lrreport_owner(rhs) ,c(&p_struct->c),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),cvrmserror(p_struct->cvrmserror),cvavgerror(p_struct->cvavgerror),cvavgrelerror(p_struct->cvavgrelerror),ncvdefects(p_struct->ncvdefects),cvdefects(&p_struct->cvdefects) +{ +} - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +lrreport& lrreport::operator=(const lrreport &rhs) +{ + if( this==&rhs ) + return *this; + _lrreport_owner::operator=(rhs); + return *this; +} +lrreport::~lrreport() +{ +} -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. +/************************************************************************* +Linear regression -RESULT: -CrossEntropy/(NPoints*LN(2)). -Zero if network solves regression task. +Subroutine builds model: -DATASET FORMAT: + Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +and model found in ALGLIB format, covariation matrix, training set errors +(rms, average, average relative) and leave-one-out cross-validation +estimate of the generalization error. CV estimate calculated using fast +algorithm with O(NPoints*NVars) complexity. -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +When covariation matrix is calculated standard deviations of function +values are assumed to be equal to RMS error on the training set. + +INPUT PARAMETERS: + XY - training set, array [0..NPoints-1,0..NVars]: + * NVars columns - independent variables + * last column - dependent variable + NPoints - training set size, NPoints>NVars+1 + NVars - number of independent variables + +OUTPUT PARAMETERS: + Info - return code: + * -255, in case of unknown internal error + * -4, if internal SVD subroutine haven't converged + * -1, if incorrect parameters was passed (NPoints(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::lrbuild(const_cast(xy.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Linear regression + +Variant of LRBuild which uses vector of standatd deviations (errors in +function values). + +INPUT PARAMETERS: + XY - training set, array [0..NPoints-1,0..NVars]: + * NVars columns - independent variables + * last column - dependent variable + S - standard deviations (errors in function values) + array[0..NPoints-1], S[i]>0. + NPoints - training set size, NPoints>NVars+1 + NVars - number of independent variables + +OUTPUT PARAMETERS: + Info - return code: + * -255, in case of unknown internal error + * -4, if internal SVD subroutine haven't converged + * -1, if incorrect parameters was passed (NPoints(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::lrbuilds(const_cast(xy.c_ptr()), const_cast(s.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -RMS error on the test set given. +Like LRBuildS, but builds model + Y = A(0)*X[0] + ... + A(N-1)*X[N-1] -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - - -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -Root mean square error. Its meaning for regression task is obvious. As for -classification task, RMS error means error when estimating posterior -probabilities. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs - -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +i.e. with zero constant term. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 30.10.2008 by Bochkanov Sergey *************************************************************************/ -double mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +void lrbuildzs(const real_2d_array &xy, const real_1d_array &s, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlprmserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::lrbuildzs(const_cast(xy.c_ptr()), const_cast(s.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Like LRBuild but builds model + + Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + +i.e. with zero constant term. -double smp_mlprmserror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) + -- ALGLIB -- + Copyright 30.10.2008 by Bochkanov Sergey +*************************************************************************/ +void lrbuildz(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, linearmodel &lm, lrreport &ar, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::_pexec_mlprmserror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::lrbuildz(const_cast(xy.c_ptr()), npoints, nvars, &info, const_cast(lm.c_ptr()), const_cast(ar.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -RMS error on the test set given by sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +Unpacks coefficients of linear model. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. - -RESULT: -Root mean square error. Its meaning for regression task is obvious. As for -classification task, RMS error means error when estimating posterior -probabilities. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + LM - linear model in ALGLIB format -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +OUTPUT PARAMETERS: + V - coefficients, array[0..NVars] + constant term (intercept) is stored in the V[NVars]. + NVars - number of independent variables (one less than number + of coefficients) -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -double mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) +void lrunpack(const linearmodel &lm, real_1d_array &v, ae_int_t &nvars, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlprmserrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::lrunpack(const_cast(lm.c_ptr()), const_cast(v.c_ptr()), &nvars, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +"Packs" coefficients and creates linear model in ALGLIB format (LRUnpack +reversed). + +INPUT PARAMETERS: + V - coefficients, array[0..NVars] + NVars - number of independent variables + +OUTPUT PAREMETERS: + LM - linear model. -double smp_mlprmserrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey +*************************************************************************/ +void lrpack(const real_1d_array &v, const ae_int_t nvars, linearmodel &lm, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlprmserrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::lrpack(const_cast(v.c_ptr()), nvars, const_cast(lm.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Average absolute error on the test set. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +Procesing INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average error when estimating posterior probabilities. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs + LM - linear model + X - input vector, array[0..NVars-1]. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +Result: + value of linear model regression estimate -- ALGLIB -- - Copyright 11.03.2008 by Bochkanov Sergey + Copyright 03.09.2008 by Bochkanov Sergey *************************************************************************/ -double mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +double lrprocess(const linearmodel &lm, const real_1d_array &x, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::mlpavgerror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::lrprocess(const_cast(lm.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +/************************************************************************* +RMS error on the test set + +INPUT PARAMETERS: + LM - linear model + XY - test set + NPoints - test set size + +RESULT: + root mean square error. -double smp_mlpavgerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey +*************************************************************************/ +double lrrmserror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::_pexec_mlpavgerror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::lrrmserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Average absolute error on the test set given by sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +Average error on the test set INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. + LM - linear model + XY - test set + NPoints - test set size RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average error when estimating posterior probabilities. - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs - -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). + average error. -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -double mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) +double lravgerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::mlpavgerrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::lravgerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +/************************************************************************* +RMS error on the test set + +INPUT PARAMETERS: + LM - linear model + XY - test set + NPoints - test set size + +RESULT: + average relative error. -double smp_mlpavgerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey +*************************************************************************/ +double lravgrelerror(const linearmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::_pexec_mlpavgerrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::lravgrelerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif +#if defined(AE_COMPILE_FILTERS) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Average relative error on the test set. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +Filters: simple moving averages (unsymmetric). +This filter replaces array by results of SMA(K) filter. SMA(K) is defined +as filter which averages at most K previous points (previous - not points +AROUND central point) - or less, in case of the first K-1 points. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - NPoints - points count. - -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average relative error when estimating posterior probability of -belonging to the correct class. + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + K - K>=1 (K can be larger than N , such cases will be + correctly handled). Window width. K=1 corresponds to + identity transformation (nothing changes). -DATASET FORMAT: +OUTPUT PARAMETERS: + X - array, whose first N elements were processed with SMA(K) -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +NOTE 2: this algorithm makes only one pass through array and uses running + sum to speed-up calculation of the averages. Additional measures + are taken to ensure that running sum on a long sequence of zero + elements will be correctly reset to zero even in the presence of + round-off error. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +NOTE 3: this is unsymmetric version of the algorithm, which does NOT + averages points after the current one. Only X[i], X[i-1], ... are + used when calculating new value of X[i]. We should also note that + this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. -- ALGLIB -- - Copyright 11.03.2008 by Bochkanov Sergey + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -double mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlpavgrelerror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -double smp_mlpavgrelerror(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints) +void filtersma(real_1d_array &x, const ae_int_t n, const ae_int_t k, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::_pexec_mlpavgrelerror(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::filtersma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Average relative error on the test set given by sparse matrix. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +Filters: simple moving averages (unsymmetric). +This filter replaces array by results of SMA(K) filter. SMA(K) is defined +as filter which averages at most K previous points (previous - not points +AROUND central point) - or less, in case of the first K-1 points. INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - NPoints - points count, >=0. - -RESULT: -Its meaning for regression task is obvious. As for classification task, it -means average relative error when estimating posterior probability of -belonging to the correct class. + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + K - K>=1 (K can be larger than N , such cases will be + correctly handled). Window width. K=1 corresponds to + identity transformation (nothing changes). -DATASET FORMAT: +OUTPUT PARAMETERS: + X - array, whose first N elements were processed with SMA(K) -This function uses two different dataset formats - one for regression -networks, another one for classification networks. +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs +NOTE 2: this algorithm makes only one pass through array and uses running + sum to speed-up calculation of the averages. Additional measures + are taken to ensure that running sum on a long sequence of zero + elements will be correctly reset to zero even in the presence of + round-off error. -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). +NOTE 3: this is unsymmetric version of the algorithm, which does NOT + averages points after the current one. Only X[i], X[i-1], ... are + used when calculating new value of X[i]. We should also note that + this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. -- ALGLIB -- - Copyright 09.08.2012 by Bochkanov Sergey + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -double mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) +#if !defined(AE_NO_EXCEPTIONS) +void filtersma(real_1d_array &x, const ae_int_t k, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlpavgrelerrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t n; -double smp_mlpavgrelerrorsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; + n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlpavgrelerrorsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::filtersma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -Gradient calculation +Filters: exponential moving averages. + +This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is +defined as filter which replaces X[] by S[]: + S[0] = X[0] + S[t] = alpha*X[t] + (1-alpha)*S[t-1] INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - X - input vector, length of array must be at least NIn - DesiredY- desired outputs, length of array must be at least NOut - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + alpha - 0(network.c_ptr()), const_cast(x.c_ptr()), const_cast(desiredy.c_ptr()), &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::filterema(const_cast(x.c_ptr()), n, alpha, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Gradient calculation (natural error function is used) +Filters: exponential moving averages. + +This filter replaces array by results of EMA(alpha) filter. EMA(alpha) is +defined as filter which replaces X[] by S[]: + S[0] = X[0] + S[t] = alpha*X[t] + (1-alpha)*S[t-1] INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - X - input vector, length of array must be at least NIn - DesiredY- desired outputs, length of array must be at least NOut - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + alpha - 0(network.c_ptr()), const_cast(x.c_ptr()), const_cast(desiredy.c_ptr()), &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::filterema(const_cast(x.c_ptr()), n, alpha, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* -Batch gradient calculation for a set of inputs/outputs +Filters: linear regression moving averages. +This filter replaces array by results of LRMA(K) filter. -FOR USERS OF COMMERCIAL EDITION: +LRMA(K) is defined as filter which, for each data point, builds linear +regression model using K prevous points (point itself is included in +these K points) and calculates value of this linear model at the point in +question. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +INPUT PARAMETERS: + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + K - K>=1 (K can be larger than N , such cases will be + correctly handled). Window width. K=1 corresponds to + identity transformation (nothing changes). +OUTPUT PARAMETERS: + X - array, whose first N elements were processed with SMA(K) -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in dense format; one sample = one row: - * first NIn columns contain inputs, - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SSize - number of elements in XY - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, array[WCount] +NOTE 2: this algorithm makes only one pass through array and uses running + sum to speed-up calculation of the averages. Additional measures + are taken to ensure that running sum on a long sequence of zero + elements will be correctly reset to zero even in the presence of + round-off error. + +NOTE 3: this is unsymmetric version of the algorithm, which does NOT + averages points after the current one. Only X[i], X[i-1], ... are + used when calculating new value of X[i]. We should also note that + this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -void mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpgradbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -void smp_mlpgradbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad) +void filterlrma(real_1d_array &x, const ae_int_t n, const ae_int_t k, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlpgradbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::filterlrma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Batch gradient calculation for a set of inputs/outputs given by sparse -matrices - - -FOR USERS OF COMMERCIAL EDITION: +Filters: linear regression moving averages. - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +This filter replaces array by results of LRMA(K) filter. +LRMA(K) is defined as filter which, for each data point, builds linear +regression model using K prevous points (point itself is included in +these K points) and calculates value of this linear model at the point in +question. INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in sparse format; one sample = one row: - * MATRIX MUST BE STORED IN CRS FORMAT - * first NIn columns contain inputs. - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SSize - number of elements in XY - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. + X - array[N], array to process. It can be larger than N, + in this case only first N points are processed. + N - points count, N>=0 + K - K>=1 (K can be larger than N , such cases will be + correctly handled). Window width. K=1 corresponds to + identity transformation (nothing changes). OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, array[WCount] + X - array, whose first N elements were processed with SMA(K) + +NOTE 1: this function uses efficient in-place algorithm which does not + allocate temporary arrays. + +NOTE 2: this algorithm makes only one pass through array and uses running + sum to speed-up calculation of the averages. Additional measures + are taken to ensure that running sum on a long sequence of zero + elements will be correctly reset to zero even in the presence of + round-off error. + +NOTE 3: this is unsymmetric version of the algorithm, which does NOT + averages points after the current one. Only X[i], X[i-1], ... are + used when calculating new value of X[i]. We should also note that + this algorithm uses BOTH previous points and current one, i.e. + new value of X[i] depends on BOTH previous point and X[i] itself. -- ALGLIB -- - Copyright 26.07.2012 by Bochkanov Sergey + Copyright 25.10.2011 by Bochkanov Sergey *************************************************************************/ -void mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad) +#if !defined(AE_NO_EXCEPTIONS) +void filterlrma(real_1d_array &x, const ae_int_t k, const xparams _xparams) { - alglib_impl::ae_state _alglib_env_state; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t n; + + n = x.length(); alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::filterlrma(const_cast(x.c_ptr()), n, k, &_alglib_env_state); + + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +#endif +#endif + +#if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* + +*************************************************************************/ +_logitmodel_owner::_logitmodel_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::mlpgradbatchsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_logitmodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::logitmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::logitmodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::logitmodel)); + alglib_impl::_logitmodel_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } - -void smp_mlpgradbatchsparse(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t ssize, double &e, real_1d_array &grad) +_logitmodel_owner::_logitmodel_owner(const _logitmodel_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlpgradbatchsparse(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_logitmodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: logitmodel copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::logitmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::logitmodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::logitmodel)); + alglib_impl::_logitmodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_logitmodel_owner& _logitmodel_owner::operator=(const _logitmodel_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: logitmodel assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: logitmodel assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_logitmodel_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::logitmodel)); + alglib_impl::_logitmodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; } -/************************************************************************* -Batch gradient calculation for a subset of dataset +_logitmodel_owner::~_logitmodel_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_logitmodel_destroy(p_struct); + ae_free(p_struct); + } +} +alglib_impl::logitmodel* _logitmodel_owner::c_ptr() +{ + return p_struct; +} -FOR USERS OF COMMERCIAL EDITION: +alglib_impl::logitmodel* _logitmodel_owner::c_ptr() const +{ + return const_cast(p_struct); +} +logitmodel::logitmodel() : _logitmodel_owner() +{ +} - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +logitmodel::logitmodel(const logitmodel &rhs):_logitmodel_owner(rhs) +{ +} +logitmodel& logitmodel::operator=(const logitmodel &rhs) +{ + if( this==&rhs ) + return *this; + _logitmodel_owner::operator=(rhs); + return *this; +} -INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in dense format; one sample = one row: - * first NIn columns contain inputs, - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SetSize - real size of XY, SetSize>=0; - Idx - subset of SubsetSize elements, array[SubsetSize]: - * Idx[I] stores row index in the original dataset which is - given by XY. Gradient is calculated with respect to rows - whose indexes are stored in Idx[]. - * Idx[] must store correct indexes; this function throws - an exception in case incorrect index (less than 0 or - larger than rows(XY)) is given - * Idx[] may store indexes in any order and even with - repetitions. - SubsetSize- number of elements in Idx[] array: - * positive value means that subset given by Idx[] is processed - * zero value results in zero gradient - * negative value means that full dataset is processed - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. +logitmodel::~logitmodel() +{ +} -OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, - array[WCount] - -- ALGLIB -- - Copyright 26.07.2012 by Bochkanov Sergey +/************************************************************************* +MNLReport structure contains information about training process: +* NGrad - number of gradient calculations +* NHess - number of Hessian calculations *************************************************************************/ -void mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad) +_mnlreport_owner::_mnlreport_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::mlpgradbatchsubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(idx.c_ptr()), subsetsize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_mnlreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mnlreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mnlreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mnlreport)); + alglib_impl::_mnlreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_mnlreport_owner::_mnlreport_owner(const _mnlreport_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); + if( p_struct!=NULL ) + { + alglib_impl::_mnlreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mnlreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mnlreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mnlreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mnlreport)); + alglib_impl::_mnlreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } - -void smp_mlpgradbatchsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad) +_mnlreport_owner& _mnlreport_owner::operator=(const _mnlreport_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlpgradbatchsubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(idx.c_ptr()), subsetsize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mnlreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mnlreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mnlreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mnlreport)); + alglib_impl::_mnlreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_mnlreport_owner::~_mnlreport_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_mnlreport_destroy(p_struct); + ae_free(p_struct); } } -/************************************************************************* -Batch gradient calculation for a set of inputs/outputs for a subset of -dataset given by set of indexes. +alglib_impl::mnlreport* _mnlreport_owner::c_ptr() +{ + return p_struct; +} +alglib_impl::mnlreport* _mnlreport_owner::c_ptr() const +{ + return const_cast(p_struct); +} +mnlreport::mnlreport() : _mnlreport_owner() ,ngrad(p_struct->ngrad),nhess(p_struct->nhess) +{ +} -FOR USERS OF COMMERCIAL EDITION: +mnlreport::mnlreport(const mnlreport &rhs):_mnlreport_owner(rhs) ,ngrad(p_struct->ngrad),nhess(p_struct->nhess) +{ +} - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. +mnlreport& mnlreport::operator=(const mnlreport &rhs) +{ + if( this==&rhs ) + return *this; + _mnlreport_owner::operator=(rhs); + return *this; +} + +mnlreport::~mnlreport() +{ +} +/************************************************************************* +This subroutine trains logit model. INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset in sparse format; one sample = one row: - * MATRIX MUST BE STORED IN CRS FORMAT - * first NIn columns contain inputs, - * for regression problem, next NOut columns store - desired outputs. - * for classification problem, next column (just one!) - stores class number. - SetSize - real size of XY, SetSize>=0; - Idx - subset of SubsetSize elements, array[SubsetSize]: - * Idx[I] stores row index in the original dataset which is - given by XY. Gradient is calculated with respect to rows - whose indexes are stored in Idx[]. - * Idx[] must store correct indexes; this function throws - an exception in case incorrect index (less than 0 or - larger than rows(XY)) is given - * Idx[] may store indexes in any order and even with - repetitions. - SubsetSize- number of elements in Idx[] array: - * positive value means that subset given by Idx[] is processed - * zero value results in zero gradient - * negative value means that full dataset is processed - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. + XY - training set, array[0..NPoints-1,0..NVars] + First NVars columns store values of independent + variables, next column stores number of class (from 0 + to NClasses-1) which dataset element belongs to. Fractional + values are rounded to nearest integer. + NPoints - training set size, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 OUTPUT PARAMETERS: - E - error function, SUM(sqr(y[i]-desiredy[i])/2,i) - Grad - gradient of E with respect to weights of network, - array[WCount] - -NOTE: when SubsetSize<0 is used full dataset by call MLPGradBatchSparse - function. + Info - return code: + * -2, if there is a point with class number + outside of [0..NClasses-1]. + * -1, if incorrect parameters was passed + (NPoints(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(idx.c_ptr()), subsetsize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -void smp_mlpgradbatchsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &idx, const ae_int_t subsetsize, double &e, real_1d_array &grad) +void mnltrainh(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, ae_int_t &info, logitmodel &lm, mnlreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlpgradbatchsparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(idx.c_ptr()), subsetsize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mnltrainh(const_cast(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(lm.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Batch gradient calculation for a set of inputs/outputs -(natural error function is used) +Procesing INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - set of inputs/outputs; one sample = one row; - first NIn columns contain inputs, - next NOut columns - desired outputs. - SSize - number of elements in XY - Grad - possibly preallocated array. If size of array is smaller - than WCount, it will be reallocated. It is recommended to - reuse previously allocated array to reduce allocation - overhead. + LM - logit model, passed by non-constant reference + (some fields of structure are used as temporaries + when calculating model output). + X - input vector, array[0..NVars-1]. + Y - (possibly) preallocated buffer; if size of Y is less than + NClasses, it will be reallocated.If it is large enough, it + is NOT reallocated, so we can save some time on reallocation. OUTPUT PARAMETERS: - E - error function, sum-of-squares for regression networks, - cross-entropy for classification networks. - Grad - gradient of E with respect to weights of network, array[WCount] + Y - result, array[0..NClasses-1] + Vector of posterior probabilities for classification task. -- ALGLIB -- - Copyright 04.11.2007 by Bochkanov Sergey + Copyright 10.09.2008 by Bochkanov Sergey *************************************************************************/ -void mlpgradnbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad) +void mnlprocess(const logitmodel &lm, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpgradnbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mnlprocess(const_cast(lm.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Batch Hessian calculation (natural error function) using R-algorithm. -Internal subroutine. +'interactive' variant of MNLProcess for languages like Python which +support constructs like "Y = MNLProcess(LM,X)" and interactive mode of the +interpreter - -- ALGLIB -- - Copyright 26.01.2008 by Bochkanov Sergey. +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. - Hessian calculation based on R-algorithm described in - "Fast Exact Multiplication by the Hessian", - B. A. Pearlmutter, - Neural Computation, 1994. + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey *************************************************************************/ -void mlphessiannbatch(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t ssize, double &e, real_1d_array &grad, real_2d_array &h) +void mnlprocessi(const logitmodel &lm, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlphessiannbatch(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), const_cast(h.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mnlprocessi(const_cast(lm.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Batch Hessian calculation using R-algorithm. -Internal subroutine. +Unpacks coefficients of logit model. Logit model have form: - -- ALGLIB -- - Copyright 26.01.2008 by Bochkanov Sergey. + P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1)) + S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), when i(network.c_ptr()), const_cast(xy.c_ptr()), ssize, &e, const_cast(grad.c_ptr()), const_cast(h.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mnlunpack(const_cast(lm.c_ptr()), const_cast(a.c_ptr()), &nvars, &nclasses, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* -Calculation of all types of errors on subset of dataset. - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +"Packs" coefficients and creates logit model in ALGLIB format (MNLUnpack +reversed). INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset; one sample = one row; - first NIn columns contain inputs, - next NOut columns - desired outputs. - SetSize - real size of XY, SetSize>=0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. + A - model (see MNLUnpack) + NVars - number of independent variables + NClasses - number of classes OUTPUT PARAMETERS: - Rep - it contains all type of errors. + LM - logit model. -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey + Copyright 10.09.2008 by Bochkanov Sergey *************************************************************************/ -void mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep) +void mnlpack(const real_2d_array &a, const ae_int_t nvars, const ae_int_t nclasses, logitmodel &lm, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpallerrorssubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mnlpack(const_cast(a.c_ptr()), nvars, nclasses, const_cast(lm.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +Average cross-entropy (in bits per element) on the test set + +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size + +RESULT: + CrossEntropy/(NPoints*ln(2)). -void smp_mlpallerrorssubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep) + -- ALGLIB -- + Copyright 10.09.2008 by Bochkanov Sergey +*************************************************************************/ +double mnlavgce(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::_pexec_mlpallerrorssubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mnlavgce(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Calculation of all types of errors on subset of dataset. - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - +Relative classification error on the test set INPUT PARAMETERS: - Network - network initialized with one of the network creation funcs - XY - original dataset given by sparse matrix; - one sample = one row; - first NIn columns contain inputs, - next NOut columns - desired outputs. - SetSize - real size of XY, SetSize>=0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. - -OUTPUT PARAMETERS: - Rep - it contains all type of errors. + LM - logit model + XY - test set + NPoints - test set size +RESULT: + percent of incorrectly classified cases. -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey + Copyright 10.09.2008 by Bochkanov Sergey *************************************************************************/ -void mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep) +double mnlrelclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpallerrorssparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mnlrelclserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +/************************************************************************* +RMS error on the test set + +INPUT PARAMETERS: + LM - logit model + XY - test set + NPoints - test set size + +RESULT: + root mean square error (error when estimating posterior probabilities). -void smp_mlpallerrorssparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize, modelerrors &rep) + -- ALGLIB -- + Copyright 30.08.2008 by Bochkanov Sergey +*************************************************************************/ +double mnlrmserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::_pexec_mlpallerrorssparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mnlrmserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* -Error of the neural network on subset of dataset. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - - -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format; - SetSize - real size of XY, SetSize>=0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. - -RESULT: - sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs - -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). - - -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey -*************************************************************************/ -double mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlperrorsubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -double smp_mlperrorsubset(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlperrorsubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Error of the neural network on subset of sparse dataset. - - -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support - ! - ! First improvement gives close-to-linear speedup on multicore systems. - ! Second improvement gives constant speedup (2-3x depending on your CPU) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. - - -INPUT PARAMETERS: - Network - neural network; - XY - training set, see below for information on the - training set format. This function checks correctness - of the dataset (no NANs/INFs, class numbers are - correct) and throws exception when incorrect dataset - is passed. Sparse matrix must use CRS format for - storage. - SetSize - real size of XY, SetSize>=0; - it is used when SubsetSize<0; - Subset - subset of SubsetSize elements, array[SubsetSize]; - SubsetSize- number of elements in Subset[] array: - * if SubsetSize>0, rows of XY with indices Subset[0]... - ...Subset[SubsetSize-1] are processed - * if SubsetSize=0, zeros are returned - * if SubsetSize<0, entire dataset is processed; Subset[] - array is ignored in this case. - -RESULT: - sum-of-squares error, SUM(sqr(y[i]-desired_y[i])/2) - -DATASET FORMAT: - -This function uses two different dataset formats - one for regression -networks, another one for classification networks. - -For regression networks with NIn inputs and NOut outputs following dataset -format is used: -* dataset is given by NPoints*(NIn+NOut) matrix -* each row corresponds to one example -* first NIn columns are inputs, next NOut columns are outputs - -For classification networks with NIn inputs and NClasses clases following -dataset format is used: -* dataset is given by NPoints*(NIn+1) matrix -* each row corresponds to one example -* first NIn columns are inputs, last column stores class number (from 0 to - NClasses-1). - - -- ALGLIB -- - Copyright 04.09.2012 by Bochkanov Sergey -*************************************************************************/ -double mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlperrorsparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -double smp_mlperrorsparsesubset(const multilayerperceptron &network, const sparsematrix &xy, const ae_int_t setsize, const integer_1d_array &subset, const ae_int_t subsetsize) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::_pexec_mlperrorsparsesubset(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), setsize, const_cast(subset.c_ptr()), subsetsize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* - -*************************************************************************/ -_logitmodel_owner::_logitmodel_owner() -{ - p_struct = (alglib_impl::logitmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::logitmodel), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_logitmodel_init(p_struct, NULL); -} - -_logitmodel_owner::_logitmodel_owner(const _logitmodel_owner &rhs) -{ - p_struct = (alglib_impl::logitmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::logitmodel), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_logitmodel_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_logitmodel_owner& _logitmodel_owner::operator=(const _logitmodel_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_logitmodel_clear(p_struct); - alglib_impl::_logitmodel_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_logitmodel_owner::~_logitmodel_owner() -{ - alglib_impl::_logitmodel_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::logitmodel* _logitmodel_owner::c_ptr() -{ - return p_struct; -} - -alglib_impl::logitmodel* _logitmodel_owner::c_ptr() const -{ - return const_cast(p_struct); -} -logitmodel::logitmodel() : _logitmodel_owner() -{ -} - -logitmodel::logitmodel(const logitmodel &rhs):_logitmodel_owner(rhs) -{ -} - -logitmodel& logitmodel::operator=(const logitmodel &rhs) -{ - if( this==&rhs ) - return *this; - _logitmodel_owner::operator=(rhs); - return *this; -} - -logitmodel::~logitmodel() -{ -} - - -/************************************************************************* -MNLReport structure contains information about training process: -* NGrad - number of gradient calculations -* NHess - number of Hessian calculations -*************************************************************************/ -_mnlreport_owner::_mnlreport_owner() -{ - p_struct = (alglib_impl::mnlreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mnlreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mnlreport_init(p_struct, NULL); -} - -_mnlreport_owner::_mnlreport_owner(const _mnlreport_owner &rhs) -{ - p_struct = (alglib_impl::mnlreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mnlreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mnlreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); -} - -_mnlreport_owner& _mnlreport_owner::operator=(const _mnlreport_owner &rhs) -{ - if( this==&rhs ) - return *this; - alglib_impl::_mnlreport_clear(p_struct); - alglib_impl::_mnlreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); - return *this; -} - -_mnlreport_owner::~_mnlreport_owner() -{ - alglib_impl::_mnlreport_clear(p_struct); - ae_free(p_struct); -} - -alglib_impl::mnlreport* _mnlreport_owner::c_ptr() -{ - return p_struct; -} - -alglib_impl::mnlreport* _mnlreport_owner::c_ptr() const -{ - return const_cast(p_struct); -} -mnlreport::mnlreport() : _mnlreport_owner() ,ngrad(p_struct->ngrad),nhess(p_struct->nhess) -{ -} - -mnlreport::mnlreport(const mnlreport &rhs):_mnlreport_owner(rhs) ,ngrad(p_struct->ngrad),nhess(p_struct->nhess) -{ -} - -mnlreport& mnlreport::operator=(const mnlreport &rhs) -{ - if( this==&rhs ) - return *this; - _mnlreport_owner::operator=(rhs); - return *this; -} - -mnlreport::~mnlreport() -{ -} - -/************************************************************************* -This subroutine trains logit model. - -INPUT PARAMETERS: - XY - training set, array[0..NPoints-1,0..NVars] - First NVars columns store values of independent - variables, next column stores number of class (from 0 - to NClasses-1) which dataset element belongs to. Fractional - values are rounded to nearest integer. - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - number of classes, NClasses>=2 - -OUTPUT PARAMETERS: - Info - return code: - * -2, if there is a point with class number - outside of [0..NClasses-1]. - * -1, if incorrect parameters was passed - (NPoints(xy.c_ptr()), npoints, nvars, nclasses, &info, const_cast(lm.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Procesing - -INPUT PARAMETERS: - LM - logit model, passed by non-constant reference - (some fields of structure are used as temporaries - when calculating model output). - X - input vector, array[0..NVars-1]. - Y - (possibly) preallocated buffer; if size of Y is less than - NClasses, it will be reallocated.If it is large enough, it - is NOT reallocated, so we can save some time on reallocation. - -OUTPUT PARAMETERS: - Y - result, array[0..NClasses-1] - Vector of posterior probabilities for classification task. - - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey -*************************************************************************/ -void mnlprocess(const logitmodel &lm, const real_1d_array &x, real_1d_array &y) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mnlprocess(const_cast(lm.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -'interactive' variant of MNLProcess for languages like Python which -support constructs like "Y = MNLProcess(LM,X)" and interactive mode of the -interpreter - -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. - - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey -*************************************************************************/ -void mnlprocessi(const logitmodel &lm, const real_1d_array &x, real_1d_array &y) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mnlprocessi(const_cast(lm.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Unpacks coefficients of logit model. Logit model have form: - - P(class=i) = S(i) / (S(0) + S(1) + ... +S(M-1)) - S(i) = Exp(A[i,0]*X[0] + ... + A[i,N-1]*X[N-1] + A[i,N]), when i(lm.c_ptr()), const_cast(a.c_ptr()), &nvars, &nclasses, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -"Packs" coefficients and creates logit model in ALGLIB format (MNLUnpack -reversed). - -INPUT PARAMETERS: - A - model (see MNLUnpack) - NVars - number of independent variables - NClasses - number of classes - -OUTPUT PARAMETERS: - LM - logit model. - - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey -*************************************************************************/ -void mnlpack(const real_2d_array &a, const ae_int_t nvars, const ae_int_t nclasses, logitmodel &lm) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mnlpack(const_cast(a.c_ptr()), nvars, nclasses, const_cast(lm.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Average cross-entropy (in bits per element) on the test set - -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size - -RESULT: - CrossEntropy/(NPoints*ln(2)). - - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey -*************************************************************************/ -double mnlavgce(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mnlavgce(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Relative classification error on the test set - -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size - -RESULT: - percent of incorrectly classified cases. - - -- ALGLIB -- - Copyright 10.09.2008 by Bochkanov Sergey -*************************************************************************/ -double mnlrelclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mnlrelclserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -RMS error on the test set - -INPUT PARAMETERS: - LM - logit model - XY - test set - NPoints - test set size - -RESULT: - root mean square error (error when estimating posterior probabilities). - - -- ALGLIB -- - Copyright 30.08.2008 by Bochkanov Sergey -*************************************************************************/ -double mnlrmserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mnlrmserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - -/************************************************************************* -Average error on the test set +Average error on the test set INPUT PARAMETERS: LM - logit model @@ -6529,20 +7520,26 @@ -- ALGLIB -- Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -double mnlavgerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints) +double mnlavgerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::mnlavgerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mnlavgerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -6559,20 +7556,26 @@ -- ALGLIB -- Copyright 30.08.2008 by Bochkanov Sergey *************************************************************************/ -double mnlavgrelerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t ssize) +double mnlavgrelerror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t ssize, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mnlavgrelerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), ssize, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mnlavgrelerror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), ssize, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -6581,22 +7584,30 @@ -- ALGLIB -- Copyright 10.09.2008 by Bochkanov Sergey *************************************************************************/ -ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints) +ae_int_t mnlclserror(const logitmodel &lm, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::ae_int_t result = alglib_impl::mnlclserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::mnlclserror(const_cast(lm.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif +#if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD) /************************************************************************* This structure is a MCPD (Markov Chains for Population Data) solver. @@ -6607,33 +7618,97 @@ *************************************************************************/ _mcpdstate_owner::_mcpdstate_owner() { - p_struct = (alglib_impl::mcpdstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdstate), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mcpdstate_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mcpdstate_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mcpdstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdstate), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mcpdstate)); + alglib_impl::_mcpdstate_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } _mcpdstate_owner::_mcpdstate_owner(const _mcpdstate_owner &rhs) { - p_struct = (alglib_impl::mcpdstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdstate), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mcpdstate_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mcpdstate_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mcpdstate copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mcpdstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdstate), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mcpdstate)); + alglib_impl::_mcpdstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } _mcpdstate_owner& _mcpdstate_owner::operator=(const _mcpdstate_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_mcpdstate_clear(p_struct); - alglib_impl::_mcpdstate_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mcpdstate assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mcpdstate assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mcpdstate_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mcpdstate)); + alglib_impl::_mcpdstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } _mcpdstate_owner::~_mcpdstate_owner() { - alglib_impl::_mcpdstate_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_mcpdstate_destroy(p_struct); + ae_free(p_struct); + } } alglib_impl::mcpdstate* _mcpdstate_owner::c_ptr() @@ -6683,33 +7758,97 @@ *************************************************************************/ _mcpdreport_owner::_mcpdreport_owner() { - p_struct = (alglib_impl::mcpdreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mcpdreport_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mcpdreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mcpdreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mcpdreport)); + alglib_impl::_mcpdreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } _mcpdreport_owner::_mcpdreport_owner(const _mcpdreport_owner &rhs) { - p_struct = (alglib_impl::mcpdreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mcpdreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mcpdreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mcpdreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mcpdreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mcpdreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mcpdreport)); + alglib_impl::_mcpdreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } _mcpdreport_owner& _mcpdreport_owner::operator=(const _mcpdreport_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_mcpdreport_clear(p_struct); - alglib_impl::_mcpdreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mcpdreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mcpdreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mcpdreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mcpdreport)); + alglib_impl::_mcpdreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } _mcpdreport_owner::~_mcpdreport_owner() { - alglib_impl::_mcpdreport_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_mcpdreport_destroy(p_struct); + ae_free(p_struct); + } } alglib_impl::mcpdreport* _mcpdreport_owner::c_ptr() @@ -6797,20 +7936,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdcreate(const ae_int_t n, mcpdstate &s) +void mcpdcreate(const ae_int_t n, mcpdstate &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdcreate(n, const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdcreate(n, const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -6859,20 +8004,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdcreateentry(const ae_int_t n, const ae_int_t entrystate, mcpdstate &s) +void mcpdcreateentry(const ae_int_t n, const ae_int_t entrystate, mcpdstate &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdcreateentry(n, entrystate, const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdcreateentry(n, entrystate, const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -6921,20 +8072,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdcreateexit(const ae_int_t n, const ae_int_t exitstate, mcpdstate &s) +void mcpdcreateexit(const ae_int_t n, const ae_int_t exitstate, mcpdstate &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdcreateexit(n, exitstate, const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdcreateexit(n, exitstate, const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -6994,20 +8151,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdcreateentryexit(const ae_int_t n, const ae_int_t entrystate, const ae_int_t exitstate, mcpdstate &s) +void mcpdcreateentryexit(const ae_int_t n, const ae_int_t entrystate, const ae_int_t exitstate, mcpdstate &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdcreateentryexit(n, entrystate, exitstate, const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdcreateentryexit(n, entrystate, exitstate, const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7042,20 +8205,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const ae_int_t k) +void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const ae_int_t k, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdaddtrack(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdaddtrack(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), k, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7090,25 +8259,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy) +#if !defined(AE_NO_EXCEPTIONS) +void mcpdaddtrack(const mcpdstate &s, const real_2d_array &xy, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; k = xy.rows(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mcpdaddtrack(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), k, &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdaddtrack(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* This function is used to add equality constraints on the elements of the @@ -7166,20 +8336,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsetec(const mcpdstate &s, const real_2d_array &ec) +void mcpdsetec(const mcpdstate &s, const real_2d_array &ec, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsetec(const_cast(s.c_ptr()), const_cast(ec.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsetec(const_cast(s.c_ptr()), const_cast(ec.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7234,20 +8410,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdaddec(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double c) +void mcpdaddec(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double c, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdaddec(const_cast(s.c_ptr()), i, j, c, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdaddec(const_cast(s.c_ptr()), i, j, c, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7298,20 +8480,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsetbc(const mcpdstate &s, const real_2d_array &bndl, const real_2d_array &bndu) +void mcpdsetbc(const mcpdstate &s, const real_2d_array &bndl, const real_2d_array &bndu, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsetbc(const_cast(s.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsetbc(const_cast(s.c_ptr()), const_cast(bndl.c_ptr()), const_cast(bndu.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7362,20 +8550,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdaddbc(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double bndl, const double bndu) +void mcpdaddbc(const mcpdstate &s, const ae_int_t i, const ae_int_t j, const double bndl, const double bndu, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdaddbc(const_cast(s.c_ptr()), i, j, bndl, bndu, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdaddbc(const_cast(s.c_ptr()), i, j, bndl, bndu, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7419,20 +8613,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k) +void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsetlc(const_cast(s.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsetlc(const_cast(s.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7476,26 +8676,27 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct) +#if !defined(AE_NO_EXCEPTIONS) +void mcpdsetlc(const mcpdstate &s, const real_2d_array &c, const integer_1d_array &ct, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; ae_int_t k; if( (c.rows()!=ct.length())) - throw ap_error("Error while calling 'mcpdsetlc': looks like one of arguments has wrong size"); + _ALGLIB_CPP_EXCEPTION("Error while calling 'mcpdsetlc': looks like one of arguments has wrong size"); k = c.rows(); alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mcpdsetlc(const_cast(s.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsetlc(const_cast(s.c_ptr()), const_cast(c.c_ptr()), const_cast(ct.c_ptr()), k, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif /************************************************************************* This function allows to tune amount of Tikhonov regularization being @@ -7517,20 +8718,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsettikhonovregularizer(const mcpdstate &s, const double v) +void mcpdsettikhonovregularizer(const mcpdstate &s, const double v, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsettikhonovregularizer(const_cast(s.c_ptr()), v, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsettikhonovregularizer(const_cast(s.c_ptr()), v, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7555,20 +8762,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsetprior(const mcpdstate &s, const real_2d_array &pp) +void mcpdsetprior(const mcpdstate &s, const real_2d_array &pp, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsetprior(const_cast(s.c_ptr()), const_cast(pp.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsetprior(const_cast(s.c_ptr()), const_cast(pp.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7596,20 +8809,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsetpredictionweights(const mcpdstate &s, const real_1d_array &pw) +void mcpdsetpredictionweights(const mcpdstate &s, const real_1d_array &pw, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsetpredictionweights(const_cast(s.c_ptr()), const_cast(pw.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsetpredictionweights(const_cast(s.c_ptr()), const_cast(pw.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7621,20 +8840,26 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdsolve(const mcpdstate &s) +void mcpdsolve(const mcpdstate &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdsolve(const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdsolve(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7656,54 +8881,126 @@ -- ALGLIB -- Copyright 23.05.2010 by Bochkanov Sergey *************************************************************************/ -void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep) +void mcpdresults(const mcpdstate &s, real_2d_array &p, mcpdreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mcpdresults(const_cast(s.c_ptr()), const_cast(p.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mcpdresults(const_cast(s.c_ptr()), const_cast(p.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif +#if defined(AE_COMPILE_MLPE) || !defined(AE_PARTIAL_BUILD) /************************************************************************* Neural networks ensemble *************************************************************************/ _mlpensemble_owner::_mlpensemble_owner() { - p_struct = (alglib_impl::mlpensemble*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpensemble), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlpensemble_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlpensemble_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mlpensemble*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpensemble), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlpensemble)); + alglib_impl::_mlpensemble_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } _mlpensemble_owner::_mlpensemble_owner(const _mlpensemble_owner &rhs) { - p_struct = (alglib_impl::mlpensemble*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpensemble), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlpensemble_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlpensemble_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlpensemble copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mlpensemble*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpensemble), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlpensemble)); + alglib_impl::_mlpensemble_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } _mlpensemble_owner& _mlpensemble_owner::operator=(const _mlpensemble_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_mlpensemble_clear(p_struct); - alglib_impl::_mlpensemble_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mlpensemble assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlpensemble assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mlpensemble_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mlpensemble)); + alglib_impl::_mlpensemble_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } _mlpensemble_owner::~_mlpensemble_owner() { - alglib_impl::_mlpensemble_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_mlpensemble_destroy(p_struct); + ae_free(p_struct); + } } alglib_impl::mlpensemble* _mlpensemble_owner::c_ptr() @@ -7758,54 +9055,128 @@ *************************************************************************/ void mlpeserialize(mlpensemble &obj, std::string &s_out) { + jmp_buf _break_jump; alglib_impl::ae_state state; alglib_impl::ae_serializer serializer; alglib_impl::ae_int_t ssize; alglib_impl::ae_state_init(&state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_alloc_start(&serializer); - alglib_impl::mlpealloc(&serializer, obj.c_ptr(), &state); - ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); - s_out.clear(); - s_out.reserve((size_t)(ssize+1)); - alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); - alglib_impl::mlpeserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - if( s_out.length()>(size_t)ssize ) - throw ap_error("ALGLIB: serialization integrity error"); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } - catch(alglib_impl::ae_error_type) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::mlpealloc(&serializer, obj.c_ptr(), &state); + ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); + s_out.clear(); + s_out.reserve((size_t)(ssize+1)); + alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); + alglib_impl::mlpeserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_assert( s_out.length()<=(size_t)ssize, "ALGLIB: serialization integrity error", &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} +/************************************************************************* +This function unserializes data structure from string. +*************************************************************************/ +void mlpeunserialize(const std::string &s_in, mlpensemble &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - throw ap_error(state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); + alglib_impl::mlpeunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } + + /************************************************************************* -This function unserializes data structure from string. +This function serializes data structure to C++ stream. + +Data stream generated by this function is same as string representation +generated by string version of serializer - alphanumeric characters, +dots, underscores, minus signs, which are grouped into words separated by +spaces and CR+LF. + +We recommend you to read comments on string version of serializer to find +out more about serialization of AlGLIB objects. *************************************************************************/ -void mlpeunserialize(std::string &s_in, mlpensemble &obj) +void mlpeserialize(mlpensemble &obj, std::ostream &s_out) { + jmp_buf _break_jump; alglib_impl::ae_state state; alglib_impl::ae_serializer serializer; alglib_impl::ae_state_init(&state); - try + if( setjmp(_break_jump) ) { - alglib_impl::ae_serializer_init(&serializer); - alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); - alglib_impl::mlpeunserialize(&serializer, obj.c_ptr(), &state); - alglib_impl::ae_serializer_stop(&serializer); - alglib_impl::ae_serializer_clear(&serializer); - alglib_impl::ae_state_clear(&state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } - catch(alglib_impl::ae_error_type) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::mlpealloc(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_get_alloc_size(&serializer); // not actually needed, but we have to ask + alglib_impl::ae_serializer_sstart_stream(&serializer, &s_out); + alglib_impl::mlpeserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} +/************************************************************************* +This function unserializes data structure from stream. +*************************************************************************/ +void mlpeunserialize(const std::istream &s_in, mlpensemble &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - throw ap_error(state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_stream(&serializer, &s_in); + alglib_impl::mlpeunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } /************************************************************************* @@ -7814,20 +9185,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreate0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreate0(nin, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreate0(nin, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7836,20 +9213,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreate1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreate1(nin, nhid, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreate1(nin, nhid, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7858,20 +9241,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreate2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreate2(nin, nhid1, nhid2, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreate2(nin, nhid1, nhid2, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7880,20 +9269,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreateb0(const ae_int_t nin, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreateb0(nin, nout, b, d, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreateb0(nin, nout, b, d, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7902,20 +9297,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreateb1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreateb1(nin, nhid, nout, b, d, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreateb1(nin, nhid, nout, b, d, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7924,20 +9325,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreateb2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double b, const double d, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreateb2(nin, nhid1, nhid2, nout, b, d, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreateb2(nin, nhid1, nhid2, nout, b, d, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7946,20 +9353,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreater0(const ae_int_t nin, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreater0(nin, nout, a, b, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreater0(nin, nout, a, b, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7968,20 +9381,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreater1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreater1(nin, nhid, nout, a, b, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreater1(nin, nhid, nout, a, b, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -7990,20 +9409,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreater2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const double a, const double b, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreater2(nin, nhid1, nhid2, nout, a, b, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreater2(nin, nhid1, nhid2, nout, a, b, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8012,20 +9437,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreatec0(const ae_int_t nin, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreatec0(nin, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreatec0(nin, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8034,20 +9465,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreatec1(const ae_int_t nin, const ae_int_t nhid, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreatec1(nin, nhid, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreatec1(nin, nhid, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8056,20 +9493,26 @@ -- ALGLIB -- Copyright 18.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreatec2(const ae_int_t nin, const ae_int_t nhid1, const ae_int_t nhid2, const ae_int_t nout, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreatec2(nin, nhid1, nhid2, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreatec2(nin, nhid1, nhid2, nout, ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8078,20 +9521,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_int_t ensemblesize, mlpensemble &ensemble) +void mlpecreatefromnetwork(const multilayerperceptron &network, const ae_int_t ensemblesize, mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpecreatefromnetwork(const_cast(network.c_ptr()), ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpecreatefromnetwork(const_cast(network.c_ptr()), ensemblesize, const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8100,20 +9549,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlperandomize(const mlpensemble &ensemble) +void mlperandomize(const mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlperandomize(const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlperandomize(const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8122,20 +9577,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &nout) +void mlpeproperties(const mlpensemble &ensemble, ae_int_t &nin, ae_int_t &nout, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpeproperties(const_cast(ensemble.c_ptr()), &nin, &nout, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpeproperties(const_cast(ensemble.c_ptr()), &nin, &nout, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8144,20 +9605,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -bool mlpeissoftmax(const mlpensemble &ensemble) +bool mlpeissoftmax(const mlpensemble &ensemble, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - ae_bool result = alglib_impl::mlpeissoftmax(const_cast(ensemble.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + ae_bool result = alglib_impl::mlpeissoftmax(const_cast(ensemble.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -8178,20 +9645,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y) +void mlpeprocess(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpeprocess(const_cast(ensemble.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpeprocess(const_cast(ensemble.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8206,20 +9679,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y) +void mlpeprocessi(const mlpensemble &ensemble, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpeprocessi(const_cast(ensemble.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpeprocessi(const_cast(ensemble.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8238,20 +9717,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints) +double mlperelclserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlperelclserror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlperelclserror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -8269,20 +9754,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints) +double mlpeavgce(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - double result = alglib_impl::mlpeavgce(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpeavgce(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -8301,20 +9792,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints) +double mlpermserror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlpermserror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpermserror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -8332,20 +9829,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints) +double mlpeavgerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlpeavgerror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpeavgerror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -8363,22 +9866,30 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints) +double mlpeavgrelerror(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try - { - double result = alglib_impl::mlpeavgrelerror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::mlpeavgrelerror(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +#endif +#if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD) /************************************************************************* Training report: * RelCLSError - fraction of misclassified cases. @@ -8397,33 +9908,97 @@ *************************************************************************/ _mlpreport_owner::_mlpreport_owner() { - p_struct = (alglib_impl::mlpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlpreport_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlpreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mlpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlpreport)); + alglib_impl::_mlpreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } _mlpreport_owner::_mlpreport_owner(const _mlpreport_owner &rhs) { - p_struct = (alglib_impl::mlpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlpreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlpreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlpreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mlpreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlpreport)); + alglib_impl::_mlpreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } _mlpreport_owner& _mlpreport_owner::operator=(const _mlpreport_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_mlpreport_clear(p_struct); - alglib_impl::_mlpreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mlpreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlpreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mlpreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mlpreport)); + alglib_impl::_mlpreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } _mlpreport_owner::~_mlpreport_owner() { - alglib_impl::_mlpreport_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_mlpreport_destroy(p_struct); + ae_free(p_struct); + } } alglib_impl::mlpreport* _mlpreport_owner::c_ptr() @@ -8461,33 +10036,97 @@ *************************************************************************/ _mlpcvreport_owner::_mlpcvreport_owner() { - p_struct = (alglib_impl::mlpcvreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpcvreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlpcvreport_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlpcvreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mlpcvreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpcvreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlpcvreport)); + alglib_impl::_mlpcvreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } _mlpcvreport_owner::_mlpcvreport_owner(const _mlpcvreport_owner &rhs) { - p_struct = (alglib_impl::mlpcvreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpcvreport), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlpcvreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlpcvreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlpcvreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mlpcvreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlpcvreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlpcvreport)); + alglib_impl::_mlpcvreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } _mlpcvreport_owner& _mlpcvreport_owner::operator=(const _mlpcvreport_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_mlpcvreport_clear(p_struct); - alglib_impl::_mlpcvreport_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mlpcvreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlpcvreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mlpcvreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mlpcvreport)); + alglib_impl::_mlpcvreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } _mlpcvreport_owner::~_mlpcvreport_owner() { - alglib_impl::_mlpcvreport_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_mlpcvreport_destroy(p_struct); + ae_free(p_struct); + } } alglib_impl::mlpcvreport* _mlpcvreport_owner::c_ptr() @@ -8528,33 +10167,97 @@ *************************************************************************/ _mlptrainer_owner::_mlptrainer_owner() { - p_struct = (alglib_impl::mlptrainer*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlptrainer), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlptrainer_init(p_struct, NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlptrainer_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::mlptrainer*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlptrainer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlptrainer)); + alglib_impl::_mlptrainer_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } _mlptrainer_owner::_mlptrainer_owner(const _mlptrainer_owner &rhs) { - p_struct = (alglib_impl::mlptrainer*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlptrainer), NULL); - if( p_struct==NULL ) - throw ap_error("ALGLIB: malloc error"); - alglib_impl::_mlptrainer_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_mlptrainer_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlptrainer copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::mlptrainer*)alglib_impl::ae_malloc(sizeof(alglib_impl::mlptrainer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::mlptrainer)); + alglib_impl::_mlptrainer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } _mlptrainer_owner& _mlptrainer_owner::operator=(const _mlptrainer_owner &rhs) { if( this==&rhs ) return *this; - alglib_impl::_mlptrainer_clear(p_struct); - alglib_impl::_mlptrainer_init_copy(p_struct, const_cast(rhs.p_struct), NULL); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: mlptrainer assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: mlptrainer assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_mlptrainer_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::mlptrainer)); + alglib_impl::_mlptrainer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); return *this; } _mlptrainer_owner::~_mlptrainer_owner() { - alglib_impl::_mlptrainer_clear(p_struct); - ae_free(p_struct); + if( p_struct!=NULL ) + { + alglib_impl::_mlptrainer_destroy(p_struct); + ae_free(p_struct); + } } alglib_impl::mlptrainer* _mlptrainer_owner::c_ptr() @@ -8617,20 +10320,26 @@ -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ -void mlptrainlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep) +void mlptrainlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlptrainlm(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, &info, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlptrainlm(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, &info, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8670,20 +10379,26 @@ -- ALGLIB -- Copyright 09.12.2007 by Bochkanov Sergey *************************************************************************/ -void mlptrainlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep) +void mlptrainlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlptrainlbfgs(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, wstep, maxits, &info, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlptrainlbfgs(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, wstep, maxits, &info, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8736,20 +10451,26 @@ -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ -void mlptraines(const multilayerperceptron &network, const real_2d_array &trnxy, const ae_int_t trnsize, const real_2d_array &valxy, const ae_int_t valsize, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep) +void mlptraines(const multilayerperceptron &network, const real_2d_array &trnxy, const ae_int_t trnsize, const real_2d_array &valxy, const ae_int_t valsize, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlptraines(const_cast(network.c_ptr()), const_cast(trnxy.c_ptr()), trnsize, const_cast(valxy.c_ptr()), valsize, decay, restarts, &info, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlptraines(const_cast(network.c_ptr()), const_cast(trnxy.c_ptr()), trnsize, const_cast(valxy.c_ptr()), valsize, decay, restarts, &info, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8781,20 +10502,26 @@ -- ALGLIB -- Copyright 09.12.2007 by Bochkanov Sergey *************************************************************************/ -void mlpkfoldcvlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep) +void mlpkfoldcvlbfgs(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpkfoldcvlbfgs(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, wstep, maxits, foldscount, &info, const_cast(rep.c_ptr()), const_cast(cvrep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpkfoldcvlbfgs(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, wstep, maxits, foldscount, &info, const_cast(rep.c_ptr()), const_cast(cvrep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8824,56 +10551,42 @@ -- ALGLIB -- Copyright 09.12.2007 by Bochkanov Sergey *************************************************************************/ -void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep) +void mlpkfoldcvlm(const multilayerperceptron &network, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const ae_int_t foldscount, ae_int_t &info, mlpreport &rep, mlpcvreport &cvrep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpkfoldcvlm(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, foldscount, &info, const_cast(rep.c_ptr()), const_cast(cvrep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpkfoldcvlm(const_cast(network.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, foldscount, &info, const_cast(rep.c_ptr()), const_cast(cvrep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* This function estimates generalization error using cross-validation on the current dataset with current training settings. -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) - ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * FoldsCount cross-validation rounds (always) - ! * NRestarts training sessions performed within each of - ! cross-validation rounds (if NRestarts>1) - ! * gradient calculation over large dataset (if dataset is large enough) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) + ! COMMERCIAL EDITION OF ALGLIB: ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: S - trainer object @@ -8920,37 +10633,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlpkfoldcv(const_cast(s.c_ptr()), const_cast(network.c_ptr()), nrestarts, foldscount, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -void smp_mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep) +void mlpkfoldcv(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, const ae_int_t foldscount, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlpkfoldcv(const_cast(s.c_ptr()), const_cast(network.c_ptr()), nrestarts, foldscount, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpkfoldcv(const_cast(s.c_ptr()), const_cast(network.c_ptr()), nrestarts, foldscount, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8968,20 +10670,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpcreatetrainer(const ae_int_t nin, const ae_int_t nout, mlptrainer &s) +void mlpcreatetrainer(const ae_int_t nin, const ae_int_t nout, mlptrainer &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreatetrainer(nin, nout, const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreatetrainer(nin, nout, const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -8999,20 +10707,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpcreatetrainercls(const ae_int_t nin, const ae_int_t nclasses, mlptrainer &s) +void mlpcreatetrainercls(const ae_int_t nin, const ae_int_t nclasses, mlptrainer &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpcreatetrainercls(nin, nclasses, const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpcreatetrainercls(nin, nclasses, const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9049,20 +10763,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpsetdataset(const mlptrainer &s, const real_2d_array &xy, const ae_int_t npoints) +void mlpsetdataset(const mlptrainer &s, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetdataset(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetdataset(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9100,20 +10820,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpsetsparsedataset(const mlptrainer &s, const sparsematrix &xy, const ae_int_t npoints) +void mlpsetsparsedataset(const mlptrainer &s, const sparsematrix &xy, const ae_int_t npoints, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetsparsedataset(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetsparsedataset(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9132,20 +10858,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpsetdecay(const mlptrainer &s, const double decay) +void mlpsetdecay(const mlptrainer &s, const double decay, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetdecay(const_cast(s.c_ptr()), decay, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetdecay(const_cast(s.c_ptr()), decay, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9174,20 +10906,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t maxits) +void mlpsetcond(const mlptrainer &s, const double wstep, const ae_int_t maxits, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetcond(const_cast(s.c_ptr()), wstep, maxits, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetcond(const_cast(s.c_ptr()), wstep, maxits, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9207,20 +10945,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpsetalgobatch(const mlptrainer &s) +void mlpsetalgobatch(const mlptrainer &s, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpsetalgobatch(const_cast(s.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpsetalgobatch(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9231,35 +10975,16 @@ Training is performed using current training algorithm. -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * NRestarts training sessions performed within each of - ! cross-validation rounds (if NRestarts>1) - ! * gradient calculation over large dataset (if dataset is large enough) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: S - trainer object @@ -9285,37 +11010,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - alglib_impl::mlptrainnetwork(const_cast(s.c_ptr()), const_cast(network.c_ptr()), nrestarts, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -void smp_mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep) +void mlptrainnetwork(const mlptrainer &s, const multilayerperceptron &network, const ae_int_t nrestarts, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlptrainnetwork(const_cast(s.c_ptr()), const_cast(network.c_ptr()), nrestarts, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlptrainnetwork(const_cast(s.c_ptr()), const_cast(network.c_ptr()), nrestarts, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9370,20 +11084,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &network, const bool randomstart) +void mlpstarttraining(const mlptrainer &s, const multilayerperceptron &network, const bool randomstart, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpstarttraining(const_cast(s.c_ptr()), const_cast(network.c_ptr()), randomstart, &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpstarttraining(const_cast(s.c_ptr()), const_cast(network.c_ptr()), randomstart, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9391,33 +11111,16 @@ not recommend you to use it unless you are pretty sure that you need ability to monitor training progress. -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) + ! COMMERCIAL EDITION OF ALGLIB: ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * gradient calculation over large dataset (if dataset is large enough) + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core - ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. - ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. This function performs step-by-step training of the neural network. Here "step-by-step" means that training starts with MLPStartTraining() call, @@ -9477,37 +11180,26 @@ -- ALGLIB -- Copyright 23.07.2012 by Bochkanov Sergey *************************************************************************/ -bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network) -{ - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try - { - ae_bool result = alglib_impl::mlpcontinuetraining(const_cast(s.c_ptr()), const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } -} - - -bool smp_mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network) +bool mlpcontinuetraining(const mlptrainer &s, const multilayerperceptron &network, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - ae_bool result = alglib_impl::_pexec_mlpcontinuetraining(const_cast(s.c_ptr()), const_cast(network.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return *(reinterpret_cast(&result)); - } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + ae_bool result = alglib_impl::mlpcontinuetraining(const_cast(s.c_ptr()), const_cast(network.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } /************************************************************************* @@ -9535,20 +11227,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpebagginglm(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors) +void mlpebagginglm(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpebagginglm(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, &info, const_cast(rep.c_ptr()), const_cast(ooberrors.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpebagginglm(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, &info, const_cast(rep.c_ptr()), const_cast(ooberrors.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9579,20 +11277,26 @@ -- ALGLIB -- Copyright 17.02.2009 by Bochkanov Sergey *************************************************************************/ -void mlpebagginglbfgs(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors) +void mlpebagginglbfgs(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, const double wstep, const ae_int_t maxits, ae_int_t &info, mlpreport &rep, mlpcvreport &ooberrors, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpebagginglbfgs(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, wstep, maxits, &info, const_cast(rep.c_ptr()), const_cast(ooberrors.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpebagginglbfgs(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, wstep, maxits, &info, const_cast(rep.c_ptr()), const_cast(ooberrors.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9619,20 +11323,26 @@ -- ALGLIB -- Copyright 10.03.2009 by Bochkanov Sergey *************************************************************************/ -void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep) +void mlpetraines(const mlpensemble &ensemble, const real_2d_array &xy, const ae_int_t npoints, const double decay, const ae_int_t restarts, ae_int_t &info, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlpetraines(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, &info, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlpetraines(const_cast(ensemble.c_ptr()), const_cast(xy.c_ptr()), npoints, decay, restarts, &info, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } /************************************************************************* @@ -9641,37 +11351,16 @@ round performs NRestarts random restarts (thus, EnsembleSize*NRestarts training rounds is performed in total). -FOR USERS OF COMMERCIAL EDITION: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (C++ and C# computational cores) - ! * SSE support (C++ computational core) - ! - ! Second improvement gives constant speedup (2-3X). First improvement - ! gives close-to-linear speedup on multicore systems. Following - ! operations can be executed in parallel: - ! * EnsembleSize training sessions performed for each of ensemble - ! members (always parallelized) - ! * NRestarts training sessions performed within each of training - ! sessions (if NRestarts>1) - ! * gradient calculation over large dataset (if dataset is large enough) - ! - ! In order to use multicore features you have to: - ! * use commercial version of ALGLIB - ! * call this function with "smp_" prefix, which indicates that - ! multicore code will be used (for multicore support) - ! - ! In order to use SSE features you have to: - ! * use commercial version of ALGLIB on Intel processors - ! * use C++ computational core + ! COMMERCIAL EDITION OF ALGLIB: ! - ! This note is given for users of commercial edition; if you use GPL - ! edition, you still will be able to call smp-version of this function, - ! but all computations will be done serially. + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! - ! We recommend you to carefully read ALGLIB Reference Manual, section - ! called 'SMP support', before using parallel version of this function. + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: S - trainer object; @@ -9700,10368 +11389,11163 @@ -- ALGLIB -- Copyright 22.08.2012 by Bochkanov Sergey *************************************************************************/ -void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep) +void mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep, const xparams _xparams) { + jmp_buf _break_jump; alglib_impl::ae_state _alglib_env_state; alglib_impl::ae_state_init(&_alglib_env_state); - try + if( setjmp(_break_jump) ) { - alglib_impl::mlptrainensemblees(const_cast(s.c_ptr()), const_cast(ensemble.c_ptr()), nrestarts, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) - { - throw ap_error(_alglib_env_state.error_msg); - } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::mlptrainensemblees(const_cast(s.c_ptr()), const_cast(ensemble.c_ptr()), nrestarts, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif + +#if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +This structure is a clusterization engine. +You should not try to access its fields directly. +Use ALGLIB functions in order to work with this object. -void smp_mlptrainensemblees(const mlptrainer &s, const mlpensemble &ensemble, const ae_int_t nrestarts, mlpreport &rep) + -- ALGLIB -- + Copyright 10.07.2012 by Bochkanov Sergey +*************************************************************************/ +_clusterizerstate_owner::_clusterizerstate_owner() { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::_pexec_mlptrainensemblees(const_cast(s.c_ptr()), const_cast(ensemble.c_ptr()), nrestarts, const_cast(rep.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); + if( p_struct!=NULL ) + { + alglib_impl::_clusterizerstate_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::clusterizerstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::clusterizerstate), &_state); + memset(p_struct, 0, sizeof(alglib_impl::clusterizerstate)); + alglib_impl::_clusterizerstate_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_clusterizerstate_owner::_clusterizerstate_owner(const _clusterizerstate_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - throw ap_error(_alglib_env_state.error_msg); + if( p_struct!=NULL ) + { + alglib_impl::_clusterizerstate_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: clusterizerstate copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::clusterizerstate*)alglib_impl::ae_malloc(sizeof(alglib_impl::clusterizerstate), &_state); + memset(p_struct, 0, sizeof(alglib_impl::clusterizerstate)); + alglib_impl::_clusterizerstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } -/************************************************************************* -Principal components analysis - -Subroutine builds orthogonal basis where first axis corresponds to -direction with maximum variance, second axis maximizes variance in subspace -orthogonal to first axis and so on. - -It should be noted that, unlike LDA, PCA does not use class labels. - -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes one important improvement of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! - ! Intel MKL gives approximately constant (with respect to number of - ! worker threads) acceleration factor which depends on CPU being used, - ! problem size and "baseline" ALGLIB edition which is used for - ! comparison. Best results are achieved for high-dimensional problems - ! (NVars is at least 256). - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. - -INPUT PARAMETERS: - X - dataset, array[0..NPoints-1,0..NVars-1]. - matrix contains ONLY INDEPENDENT VARIABLES. - NPoints - dataset size, NPoints>=0 - NVars - number of independent variables, NVars>=1 - -OUTPUT PARAMETERS: - Info - return code: - * -4, if SVD subroutine haven't converged - * -1, if wrong parameters has been passed (NPoints<0, - NVars<1) - * 1, if task is solved - S2 - array[0..NVars-1]. variance values corresponding - to basis vectors. - V - array[0..NVars-1,0..NVars-1] - matrix, whose columns store basis vectors. - - -- ALGLIB -- - Copyright 25.08.2008 by Bochkanov Sergey -*************************************************************************/ -void pcabuildbasis(const real_2d_array &x, const ae_int_t npoints, const ae_int_t nvars, ae_int_t &info, real_1d_array &s2, real_2d_array &v) +_clusterizerstate_owner& _clusterizerstate_owner::operator=(const _clusterizerstate_owner &rhs) { - alglib_impl::ae_state _alglib_env_state; - alglib_impl::ae_state_init(&_alglib_env_state); - try + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - alglib_impl::pcabuildbasis(const_cast(x.c_ptr()), npoints, nvars, &info, const_cast(s2.c_ptr()), const_cast(v.c_ptr()), &_alglib_env_state); - alglib_impl::ae_state_clear(&_alglib_env_state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - catch(alglib_impl::ae_error_type) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: clusterizerstate assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: clusterizerstate assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_clusterizerstate_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::clusterizerstate)); + alglib_impl::_clusterizerstate_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_clusterizerstate_owner::~_clusterizerstate_owner() +{ + if( p_struct!=NULL ) { - throw ap_error(_alglib_env_state.error_msg); + alglib_impl::_clusterizerstate_destroy(p_struct); + ae_free(p_struct); } } + +alglib_impl::clusterizerstate* _clusterizerstate_owner::c_ptr() +{ + return p_struct; } -///////////////////////////////////////////////////////////////////////// -// -// THIS SECTION CONTAINS IMPLEMENTATION OF COMPUTATIONAL CORE -// -///////////////////////////////////////////////////////////////////////// -namespace alglib_impl +alglib_impl::clusterizerstate* _clusterizerstate_owner::c_ptr() const { -static double bdss_xlny(double x, double y, ae_state *_state); -static double bdss_getcv(/* Integer */ ae_vector* cnt, - ae_int_t nc, - ae_state *_state); -static void bdss_tieaddc(/* Integer */ ae_vector* c, - /* Integer */ ae_vector* ties, - ae_int_t ntie, - ae_int_t nc, - /* Integer */ ae_vector* cnt, - ae_state *_state); -static void bdss_tiesubc(/* Integer */ ae_vector* c, - /* Integer */ ae_vector* ties, - ae_int_t ntie, - ae_int_t nc, - /* Integer */ ae_vector* cnt, - ae_state *_state); + return const_cast(p_struct); +} +clusterizerstate::clusterizerstate() : _clusterizerstate_owner() +{ +} +clusterizerstate::clusterizerstate(const clusterizerstate &rhs):_clusterizerstate_owner(rhs) +{ +} -static double clustering_parallelcomplexity = 200000; -static ae_int_t clustering_kmeansblocksize = 32; -static ae_int_t clustering_kmeansparalleldim = 8; -static ae_int_t clustering_kmeansparallelk = 8; -static void clustering_selectinitialcenters(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t initalgo, - ae_int_t k, - /* Real */ ae_matrix* ct, - apbuffers* initbuf, - ae_shared_pool* updatepool, - ae_state *_state); -static ae_bool clustering_fixcenters(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - /* Real */ ae_matrix* ct, - ae_int_t k, - apbuffers* initbuf, - ae_shared_pool* updatepool, - ae_state *_state); -static void clustering_clusterizerrunahcinternal(clusterizerstate* s, - /* Real */ ae_matrix* d, - ahcreport* rep, - ae_state *_state); -static void clustering_evaluatedistancematrixrec(/* Real */ ae_matrix* xy, - ae_int_t nfeatures, - ae_int_t disttype, - /* Real */ ae_matrix* d, - ae_int_t i0, - ae_int_t i1, - ae_int_t j0, - ae_int_t j1, - ae_state *_state); +clusterizerstate& clusterizerstate::operator=(const clusterizerstate &rhs) +{ + if( this==&rhs ) + return *this; + _clusterizerstate_owner::operator=(rhs); + return *this; +} +clusterizerstate::~clusterizerstate() +{ +} +/************************************************************************* +This structure is used to store results of the agglomerative hierarchical +clustering (AHC). -static ae_int_t dforest_innernodewidth = 3; -static ae_int_t dforest_leafnodewidth = 2; -static ae_int_t dforest_dfusestrongsplits = 1; -static ae_int_t dforest_dfuseevs = 2; -static ae_int_t dforest_dffirstversion = 0; -static ae_int_t dforest_dfclserror(decisionforest* df, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_state *_state); -static void dforest_dfprocessinternal(decisionforest* df, - ae_int_t offs, - /* Real */ ae_vector* x, - /* Real */ ae_vector* y, - ae_state *_state); -static void dforest_dfbuildtree(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t nfeatures, - ae_int_t nvarsinpool, - ae_int_t flags, - dfinternalbuffers* bufs, - hqrndstate* rs, - ae_state *_state); -static void dforest_dfbuildtreerec(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t nfeatures, - ae_int_t nvarsinpool, - ae_int_t flags, - ae_int_t* numprocessed, - ae_int_t idx1, - ae_int_t idx2, - dfinternalbuffers* bufs, - hqrndstate* rs, - ae_state *_state); -static void dforest_dfsplitc(/* Real */ ae_vector* x, - /* Integer */ ae_vector* c, - /* Integer */ ae_vector* cntbuf, - ae_int_t n, - ae_int_t nc, - ae_int_t flags, - ae_int_t* info, - double* threshold, - double* e, - /* Real */ ae_vector* sortrbuf, - /* Integer */ ae_vector* sortibuf, - ae_state *_state); -static void dforest_dfsplitr(/* Real */ ae_vector* x, - /* Real */ ae_vector* y, - ae_int_t n, - ae_int_t flags, - ae_int_t* info, - double* threshold, - double* e, - /* Real */ ae_vector* sortrbuf, - /* Real */ ae_vector* sortrbuf2, - ae_state *_state); +Following information is returned: +* TerminationType - completion code: + * 1 for successful completion of algorithm + * -5 inappropriate combination of clustering algorithm and distance + function was used. As for now, it is possible only when Ward's + method is called for dataset with non-Euclidean distance function. + In case negative completion code is returned, other fields of report + structure are invalid and should not be used. -static ae_int_t linreg_lrvnum = 5; -static void linreg_lrinternal(/* Real */ ae_matrix* xy, - /* Real */ ae_vector* s, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t* info, - linearmodel* lm, - lrreport* ar, - ae_state *_state); +* NPoints contains number of points in the original dataset +* Z contains information about merges performed (see below). Z contains + indexes from the original (unsorted) dataset and it can be used when you + need to know what points were merged. However, it is not convenient when + you want to build a dendrograd (see below). +* if you want to build dendrogram, you can use Z, but it is not good + option, because Z contains indexes from unsorted dataset. Dendrogram + built from such dataset is likely to have intersections. So, you have to + reorder you points before building dendrogram. + Permutation which reorders point is returned in P. Another representation + of merges, which is more convenient for dendorgram construction, is + returned in PM. +* more information on format of Z, P and PM can be found below and in the + examples from ALGLIB Reference Manual. +FORMAL DESCRIPTION OF FIELDS: + NPoints number of points + Z array[NPoints-1,2], contains indexes of clusters + linked in pairs to form clustering tree. I-th row + corresponds to I-th merge: + * Z[I,0] - index of the first cluster to merge + * Z[I,1] - index of the second cluster to merge + * Z[I,0](rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} +_ahcreport_owner& _ahcreport_owner::operator=(const _ahcreport_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: ahcreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: ahcreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_ahcreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::ahcreport)); + alglib_impl::_ahcreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} -static double mlptrain_mindecay = 0.001; -static ae_int_t mlptrain_defaultlbfgsfactor = 6; -static void mlptrain_mlpkfoldcvgeneral(multilayerperceptron* n, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - ae_int_t foldscount, - ae_bool lmalgorithm, - double wstep, - ae_int_t maxits, - ae_int_t* info, - mlpreport* rep, - mlpcvreport* cvrep, - ae_state *_state); -static void mlptrain_mlpkfoldsplit(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nclasses, - ae_int_t foldscount, - ae_bool stratifiedsplits, - /* Integer */ ae_vector* folds, - ae_state *_state); -static void mlptrain_mthreadcv(mlptrainer* s, - ae_int_t rowsize, - ae_int_t nrestarts, - /* Integer */ ae_vector* folds, - ae_int_t fold, - ae_int_t dfold, - /* Real */ ae_matrix* cvy, - ae_shared_pool* pooldatacv, - ae_state *_state); -static void mlptrain_mlptrainnetworkx(mlptrainer* s, - ae_int_t nrestarts, - ae_int_t algokind, - /* Integer */ ae_vector* trnsubset, - ae_int_t trnsubsetsize, - /* Integer */ ae_vector* valsubset, - ae_int_t valsubsetsize, - multilayerperceptron* network, - mlpreport* rep, - ae_bool isrootcall, - ae_shared_pool* sessions, - ae_state *_state); -static void mlptrain_mlptrainensemblex(mlptrainer* s, - mlpensemble* ensemble, - ae_int_t idx0, - ae_int_t idx1, - ae_int_t nrestarts, - ae_int_t trainingmethod, - sinteger* ngrad, - ae_bool isrootcall, - ae_shared_pool* esessions, - ae_state *_state); -static void mlptrain_mlpstarttrainingx(mlptrainer* s, - ae_bool randomstart, - ae_int_t algokind, - /* Integer */ ae_vector* subset, - ae_int_t subsetsize, - smlptrnsession* session, - ae_state *_state); -static ae_bool mlptrain_mlpcontinuetrainingx(mlptrainer* s, - /* Integer */ ae_vector* subset, - ae_int_t subsetsize, - ae_int_t* ngradbatch, - smlptrnsession* session, - ae_state *_state); -static void mlptrain_mlpebagginginternal(mlpensemble* ensemble, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - double decay, - ae_int_t restarts, - double wstep, - ae_int_t maxits, - ae_bool lmalgorithm, - ae_int_t* info, - mlpreport* rep, - mlpcvreport* ooberrors, - ae_state *_state); -static void mlptrain_initmlptrnsession(multilayerperceptron* networktrained, - ae_bool randomizenetwork, - mlptrainer* trainer, - smlptrnsession* session, - ae_state *_state); -static void mlptrain_initmlptrnsessions(multilayerperceptron* networktrained, - ae_bool randomizenetwork, - mlptrainer* trainer, - ae_shared_pool* sessions, - ae_state *_state); -static void mlptrain_initmlpetrnsession(multilayerperceptron* individualnetwork, - mlptrainer* trainer, - mlpetrnsession* session, - ae_state *_state); -static void mlptrain_initmlpetrnsessions(multilayerperceptron* individualnetwork, - mlptrainer* trainer, - ae_shared_pool* sessions, - ae_state *_state); +_ahcreport_owner::~_ahcreport_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_ahcreport_destroy(p_struct); + ae_free(p_struct); + } +} +alglib_impl::ahcreport* _ahcreport_owner::c_ptr() +{ + return p_struct; +} +alglib_impl::ahcreport* _ahcreport_owner::c_ptr() const +{ + return const_cast(p_struct); +} +ahcreport::ahcreport() : _ahcreport_owner() ,terminationtype(p_struct->terminationtype),npoints(p_struct->npoints),p(&p_struct->p),z(&p_struct->z),pz(&p_struct->pz),pm(&p_struct->pm),mergedist(&p_struct->mergedist) +{ +} +ahcreport::ahcreport(const ahcreport &rhs):_ahcreport_owner(rhs) ,terminationtype(p_struct->terminationtype),npoints(p_struct->npoints),p(&p_struct->p),z(&p_struct->z),pz(&p_struct->pz),pm(&p_struct->pm),mergedist(&p_struct->mergedist) +{ +} +ahcreport& ahcreport::operator=(const ahcreport &rhs) +{ + if( this==&rhs ) + return *this; + _ahcreport_owner::operator=(rhs); + return *this; +} +ahcreport::~ahcreport() +{ +} /************************************************************************* -This set of routines (DSErrAllocate, DSErrAccumulate, DSErrFinish) -calculates different error functions (classification error, cross-entropy, -rms, avg, avg.rel errors). +This structure is used to store results of the k-means clustering +algorithm. -1. DSErrAllocate prepares buffer. -2. DSErrAccumulate accumulates individual errors: - * Y contains predicted output (posterior probabilities for classification) - * DesiredY contains desired output (class number for classification) -3. DSErrFinish outputs results: - * Buf[0] contains relative classification error (zero for regression tasks) - * Buf[1] contains avg. cross-entropy (zero for regression tasks) - * Buf[2] contains rms error (regression, classification) - * Buf[3] contains average error (regression, classification) - * Buf[4] contains average relative error (regression, classification) - -NOTES(1): - "NClasses>0" means that we have classification task. - "NClasses<0" means regression task with -NClasses real outputs. +Following information is always returned: +* NPoints contains number of points in the original dataset +* TerminationType contains completion code, negative on failure, positive + on success +* K contains number of clusters -NOTES(2): - rms. avg, avg.rel errors for classification tasks are interpreted as - errors in posterior probabilities with respect to probabilities given - by training/test set. +For positive TerminationType we return: +* NFeatures contains number of variables in the original dataset +* C, which contains centers found by algorithm +* CIdx, which maps points of the original dataset to clusters + +FORMAL DESCRIPTION OF FIELDS: + NPoints number of points, >=0 + NFeatures number of variables, >=1 + TerminationType completion code: + * -5 if distance type is anything different from + Euclidean metric + * -3 for degenerate dataset: a) less than K distinct + points, b) K=0 for non-empty dataset. + * +1 for successful completion + K number of clusters + C array[K,NFeatures], rows of the array store centers + CIdx array[NPoints], which contains cluster indexes + IterationsCount actual number of iterations performed by clusterizer. + If algorithm performed more than one random restart, + total number of iterations is returned. + Energy merit function, "energy", sum of squared deviations + from cluster centers -- ALGLIB -- - Copyright 11.01.2009 by Bochkanov Sergey + Copyright 27.11.2012 by Bochkanov Sergey *************************************************************************/ -void dserrallocate(ae_int_t nclasses, - /* Real */ ae_vector* buf, - ae_state *_state) +_kmeansreport_owner::_kmeansreport_owner() { - - ae_vector_clear(buf); - - ae_vector_set_length(buf, 7+1, _state); - buf->ptr.p_double[0] = (double)(0); - buf->ptr.p_double[1] = (double)(0); - buf->ptr.p_double[2] = (double)(0); - buf->ptr.p_double[3] = (double)(0); - buf->ptr.p_double[4] = (double)(0); - buf->ptr.p_double[5] = (double)(nclasses); - buf->ptr.p_double[6] = (double)(0); - buf->ptr.p_double[7] = (double)(0); + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_kmeansreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::kmeansreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::kmeansreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::kmeansreport)); + alglib_impl::_kmeansreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } - -/************************************************************************* -See DSErrAllocate for comments on this routine. - - -- ALGLIB -- - Copyright 11.01.2009 by Bochkanov Sergey -*************************************************************************/ -void dserraccumulate(/* Real */ ae_vector* buf, - /* Real */ ae_vector* y, - /* Real */ ae_vector* desiredy, - ae_state *_state) +_kmeansreport_owner::_kmeansreport_owner(const _kmeansreport_owner &rhs) { - ae_int_t nclasses; - ae_int_t nout; - ae_int_t offs; - ae_int_t mmax; - ae_int_t rmax; - ae_int_t j; - double v; - double ev; - - - offs = 5; - nclasses = ae_round(buf->ptr.p_double[offs], _state); - if( nclasses>0 ) + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - - /* - * Classification - */ - rmax = ae_round(desiredy->ptr.p_double[0], _state); - mmax = 0; - for(j=1; j<=nclasses-1; j++) - { - if( ae_fp_greater(y->ptr.p_double[j],y->ptr.p_double[mmax]) ) - { - mmax = j; - } - } - if( mmax!=rmax ) - { - buf->ptr.p_double[0] = buf->ptr.p_double[0]+1; - } - if( ae_fp_greater(y->ptr.p_double[rmax],(double)(0)) ) - { - buf->ptr.p_double[1] = buf->ptr.p_double[1]-ae_log(y->ptr.p_double[rmax], _state); - } - else - { - buf->ptr.p_double[1] = buf->ptr.p_double[1]+ae_log(ae_maxrealnumber, _state); - } - for(j=0; j<=nclasses-1; j++) + if( p_struct!=NULL ) { - v = y->ptr.p_double[j]; - if( j==rmax ) - { - ev = (double)(1); - } - else - { - ev = (double)(0); - } - buf->ptr.p_double[2] = buf->ptr.p_double[2]+ae_sqr(v-ev, _state); - buf->ptr.p_double[3] = buf->ptr.p_double[3]+ae_fabs(v-ev, _state); - if( ae_fp_neq(ev,(double)(0)) ) - { - buf->ptr.p_double[4] = buf->ptr.p_double[4]+ae_fabs((v-ev)/ev, _state); - buf->ptr.p_double[offs+2] = buf->ptr.p_double[offs+2]+1; - } - } - buf->ptr.p_double[offs+1] = buf->ptr.p_double[offs+1]+1; + alglib_impl::_kmeansreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - else + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: kmeansreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::kmeansreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::kmeansreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::kmeansreport)); + alglib_impl::_kmeansreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_kmeansreport_owner& _kmeansreport_owner::operator=(const _kmeansreport_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - - /* - * Regression - */ - nout = -nclasses; - rmax = 0; - for(j=1; j<=nout-1; j++) - { - if( ae_fp_greater(desiredy->ptr.p_double[j],desiredy->ptr.p_double[rmax]) ) - { - rmax = j; - } - } - mmax = 0; - for(j=1; j<=nout-1; j++) - { - if( ae_fp_greater(y->ptr.p_double[j],y->ptr.p_double[mmax]) ) - { - mmax = j; - } - } - if( mmax!=rmax ) - { - buf->ptr.p_double[0] = buf->ptr.p_double[0]+1; - } - for(j=0; j<=nout-1; j++) - { - v = y->ptr.p_double[j]; - ev = desiredy->ptr.p_double[j]; - buf->ptr.p_double[2] = buf->ptr.p_double[2]+ae_sqr(v-ev, _state); - buf->ptr.p_double[3] = buf->ptr.p_double[3]+ae_fabs(v-ev, _state); - if( ae_fp_neq(ev,(double)(0)) ) - { - buf->ptr.p_double[4] = buf->ptr.p_double[4]+ae_fabs((v-ev)/ev, _state); - buf->ptr.p_double[offs+2] = buf->ptr.p_double[offs+2]+1; - } - } - buf->ptr.p_double[offs+1] = buf->ptr.p_double[offs+1]+1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: kmeansreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: kmeansreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_kmeansreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::kmeansreport)); + alglib_impl::_kmeansreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; } +_kmeansreport_owner::~_kmeansreport_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_kmeansreport_destroy(p_struct); + ae_free(p_struct); + } +} -/************************************************************************* -See DSErrAllocate for comments on this routine. +alglib_impl::kmeansreport* _kmeansreport_owner::c_ptr() +{ + return p_struct; +} - -- ALGLIB -- - Copyright 11.01.2009 by Bochkanov Sergey -*************************************************************************/ -void dserrfinish(/* Real */ ae_vector* buf, ae_state *_state) +alglib_impl::kmeansreport* _kmeansreport_owner::c_ptr() const { - ae_int_t nout; - ae_int_t offs; + return const_cast(p_struct); +} +kmeansreport::kmeansreport() : _kmeansreport_owner() ,npoints(p_struct->npoints),nfeatures(p_struct->nfeatures),terminationtype(p_struct->terminationtype),iterationscount(p_struct->iterationscount),energy(p_struct->energy),k(p_struct->k),c(&p_struct->c),cidx(&p_struct->cidx) +{ +} +kmeansreport::kmeansreport(const kmeansreport &rhs):_kmeansreport_owner(rhs) ,npoints(p_struct->npoints),nfeatures(p_struct->nfeatures),terminationtype(p_struct->terminationtype),iterationscount(p_struct->iterationscount),energy(p_struct->energy),k(p_struct->k),c(&p_struct->c),cidx(&p_struct->cidx) +{ +} - offs = 5; - nout = ae_iabs(ae_round(buf->ptr.p_double[offs], _state), _state); - if( ae_fp_neq(buf->ptr.p_double[offs+1],(double)(0)) ) - { - buf->ptr.p_double[0] = buf->ptr.p_double[0]/buf->ptr.p_double[offs+1]; - buf->ptr.p_double[1] = buf->ptr.p_double[1]/buf->ptr.p_double[offs+1]; - buf->ptr.p_double[2] = ae_sqrt(buf->ptr.p_double[2]/(nout*buf->ptr.p_double[offs+1]), _state); - buf->ptr.p_double[3] = buf->ptr.p_double[3]/(nout*buf->ptr.p_double[offs+1]); - } - if( ae_fp_neq(buf->ptr.p_double[offs+2],(double)(0)) ) - { - buf->ptr.p_double[4] = buf->ptr.p_double[4]/buf->ptr.p_double[offs+2]; - } +kmeansreport& kmeansreport::operator=(const kmeansreport &rhs) +{ + if( this==&rhs ) + return *this; + _kmeansreport_owner::operator=(rhs); + return *this; } +kmeansreport::~kmeansreport() +{ +} /************************************************************************* +This function initializes clusterizer object. Newly initialized object is +empty, i.e. it does not contain dataset. You should use it as follows: +1. creation +2. dataset is added with ClusterizerSetPoints() +3. additional parameters are set +3. clusterization is performed with one of the clustering functions -- ALGLIB -- - Copyright 19.05.2008 by Bochkanov Sergey + Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -void dsnormalize(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t* info, - /* Real */ ae_vector* means, - /* Real */ ae_vector* sigmas, - ae_state *_state) +void clusterizercreate(clusterizerstate &s, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t j; - ae_vector tmp; - double mean; - double variance; - double skewness; - double kurtosis; - - ae_frame_make(_state, &_frame_block); - *info = 0; - ae_vector_clear(means); - ae_vector_clear(sigmas); - ae_vector_init(&tmp, 0, DT_REAL, _state); - - - /* - * Test parameters - */ - if( npoints<=0||nvars<1 ) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *info = -1; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - *info = 1; - - /* - * Standartization - */ - ae_vector_set_length(means, nvars-1+1, _state); - ae_vector_set_length(sigmas, nvars-1+1, _state); - ae_vector_set_length(&tmp, npoints-1+1, _state); - for(j=0; j<=nvars-1; j++) - { - ae_v_move(&tmp.ptr.p_double[0], 1, &xy->ptr.pp_double[0][j], xy->stride, ae_v_len(0,npoints-1)); - samplemoments(&tmp, npoints, &mean, &variance, &skewness, &kurtosis, _state); - means->ptr.p_double[j] = mean; - sigmas->ptr.p_double[j] = ae_sqrt(variance, _state); - if( ae_fp_eq(sigmas->ptr.p_double[j],(double)(0)) ) - { - sigmas->ptr.p_double[j] = (double)(1); - } - for(i=0; i<=npoints-1; i++) - { - xy->ptr.pp_double[i][j] = (xy->ptr.pp_double[i][j]-means->ptr.p_double[j])/sigmas->ptr.p_double[j]; - } - } - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizercreate(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* +This function adds dataset to the clusterizer structure. + +This function overrides all previous calls of ClusterizerSetPoints() or +ClusterizerSetDistances(). + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + XY - array[NPoints,NFeatures], dataset + NPoints - number of points, >=0 + NFeatures- number of features, >=1 + DistType- distance function: + * 0 Chebyshev distance (L-inf norm) + * 1 city block distance (L1 norm) + * 2 Euclidean distance (L2 norm), non-squared + * 10 Pearson correlation: + dist(a,b) = 1-corr(a,b) + * 11 Absolute Pearson correlation: + dist(a,b) = 1-|corr(a,b)| + * 12 Uncentered Pearson correlation (cosine of the angle): + dist(a,b) = a'*b/(|a|*|b|) + * 13 Absolute uncentered Pearson correlation + dist(a,b) = |a'*b|/(|a|*|b|) + * 20 Spearman rank correlation: + dist(a,b) = 1-rankcorr(a,b) + * 21 Absolute Spearman rank correlation + dist(a,b) = 1-|rankcorr(a,b)| + +NOTE 1: different distance functions have different performance penalty: + * Euclidean or Pearson correlation distances are the fastest ones + * Spearman correlation distance function is a bit slower + * city block and Chebyshev distances are order of magnitude slower + + The reason behing difference in performance is that correlation-based + distance functions are computed using optimized linear algebra kernels, + while Chebyshev and city block distance functions are computed using + simple nested loops with two branches at each iteration. + +NOTE 2: different clustering algorithms have different limitations: + * agglomerative hierarchical clustering algorithms may be used with + any kind of distance metric + * k-means++ clustering algorithm may be used only with Euclidean + distance function + Thus, list of specific clustering algorithms you may use depends + on distance function you specify when you set your dataset. -- ALGLIB -- - Copyright 19.05.2008 by Bochkanov Sergey + Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -void dsnormalizec(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t* info, - /* Real */ ae_vector* means, - /* Real */ ae_vector* sigmas, - ae_state *_state) +void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t j; - ae_vector tmp; - double mean; - double variance; - double skewness; - double kurtosis; - - ae_frame_make(_state, &_frame_block); - *info = 0; - ae_vector_clear(means); - ae_vector_clear(sigmas); - ae_vector_init(&tmp, 0, DT_REAL, _state); - - - /* - * Test parameters - */ - if( npoints<=0||nvars<1 ) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *info = -1; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - *info = 1; - - /* - * Standartization - */ - ae_vector_set_length(means, nvars-1+1, _state); - ae_vector_set_length(sigmas, nvars-1+1, _state); - ae_vector_set_length(&tmp, npoints-1+1, _state); - for(j=0; j<=nvars-1; j++) - { - ae_v_move(&tmp.ptr.p_double[0], 1, &xy->ptr.pp_double[0][j], xy->stride, ae_v_len(0,npoints-1)); - samplemoments(&tmp, npoints, &mean, &variance, &skewness, &kurtosis, _state); - means->ptr.p_double[j] = mean; - sigmas->ptr.p_double[j] = ae_sqrt(variance, _state); - if( ae_fp_eq(sigmas->ptr.p_double[j],(double)(0)) ) - { - sigmas->ptr.p_double[j] = (double)(1); - } - } - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetpoints(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nfeatures, disttype, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* +This function adds dataset to the clusterizer structure. + +This function overrides all previous calls of ClusterizerSetPoints() or +ClusterizerSetDistances(). + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + XY - array[NPoints,NFeatures], dataset + NPoints - number of points, >=0 + NFeatures- number of features, >=1 + DistType- distance function: + * 0 Chebyshev distance (L-inf norm) + * 1 city block distance (L1 norm) + * 2 Euclidean distance (L2 norm), non-squared + * 10 Pearson correlation: + dist(a,b) = 1-corr(a,b) + * 11 Absolute Pearson correlation: + dist(a,b) = 1-|corr(a,b)| + * 12 Uncentered Pearson correlation (cosine of the angle): + dist(a,b) = a'*b/(|a|*|b|) + * 13 Absolute uncentered Pearson correlation + dist(a,b) = |a'*b|/(|a|*|b|) + * 20 Spearman rank correlation: + dist(a,b) = 1-rankcorr(a,b) + * 21 Absolute Spearman rank correlation + dist(a,b) = 1-|rankcorr(a,b)| + +NOTE 1: different distance functions have different performance penalty: + * Euclidean or Pearson correlation distances are the fastest ones + * Spearman correlation distance function is a bit slower + * city block and Chebyshev distances are order of magnitude slower + + The reason behing difference in performance is that correlation-based + distance functions are computed using optimized linear algebra kernels, + while Chebyshev and city block distance functions are computed using + simple nested loops with two branches at each iteration. + +NOTE 2: different clustering algorithms have different limitations: + * agglomerative hierarchical clustering algorithms may be used with + any kind of distance metric + * k-means++ clustering algorithm may be used only with Euclidean + distance function + Thus, list of specific clustering algorithms you may use depends + on distance function you specify when you set your dataset. -- ALGLIB -- - Copyright 19.05.2008 by Bochkanov Sergey + Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -double dsgetmeanmindistance(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_state *_state) +#if !defined(AE_NO_EXCEPTIONS) +void clusterizersetpoints(const clusterizerstate &s, const real_2d_array &xy, const ae_int_t disttype, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t j; - ae_vector tmp; - ae_vector tmp2; - double v; - double result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t npoints; + ae_int_t nfeatures; - ae_frame_make(_state, &_frame_block); - ae_vector_init(&tmp, 0, DT_REAL, _state); - ae_vector_init(&tmp2, 0, DT_REAL, _state); + npoints = xy.rows(); + nfeatures = xy.cols(); + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetpoints(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nfeatures, disttype, &_alglib_env_state); - - /* - * Test parameters - */ - if( npoints<=0||nvars<1 ) - { - result = (double)(0); - ae_frame_leave(_state); - return result; - } - - /* - * Process - */ - ae_vector_set_length(&tmp, npoints-1+1, _state); - for(i=0; i<=npoints-1; i++) - { - tmp.ptr.p_double[i] = ae_maxrealnumber; - } - ae_vector_set_length(&tmp2, nvars-1+1, _state); - for(i=0; i<=npoints-1; i++) - { - for(j=i+1; j<=npoints-1; j++) - { - ae_v_move(&tmp2.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1)); - ae_v_sub(&tmp2.ptr.p_double[0], 1, &xy->ptr.pp_double[j][0], 1, ae_v_len(0,nvars-1)); - v = ae_v_dotproduct(&tmp2.ptr.p_double[0], 1, &tmp2.ptr.p_double[0], 1, ae_v_len(0,nvars-1)); - v = ae_sqrt(v, _state); - tmp.ptr.p_double[i] = ae_minreal(tmp.ptr.p_double[i], v, _state); - tmp.ptr.p_double[j] = ae_minreal(tmp.ptr.p_double[j], v, _state); - } - } - result = (double)(0); - for(i=0; i<=npoints-1; i++) - { - result = result+tmp.ptr.p_double[i]/npoints; - } - ae_frame_leave(_state); - return result; + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - +#endif /************************************************************************* +This function adds dataset given by distance matrix to the clusterizer +structure. It is important that dataset is not given explicitly - only +distance matrix is given. + +This function overrides all previous calls of ClusterizerSetPoints() or +ClusterizerSetDistances(). + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + D - array[NPoints,NPoints], distance matrix given by its upper + or lower triangle (main diagonal is ignored because its + entries are expected to be zero). + NPoints - number of points + IsUpper - whether upper or lower triangle of D is given. + +NOTE 1: different clustering algorithms have different limitations: + * agglomerative hierarchical clustering algorithms may be used with + any kind of distance metric, including one which is given by + distance matrix + * k-means++ clustering algorithm may be used only with Euclidean + distance function and explicitly given points - it can not be + used with dataset given by distance matrix + Thus, if you call this function, you will be unable to use k-means + clustering algorithm to process your problem. -- ALGLIB -- - Copyright 19.05.2008 by Bochkanov Sergey + Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -void dstie(/* Real */ ae_vector* a, - ae_int_t n, - /* Integer */ ae_vector* ties, - ae_int_t* tiecount, - /* Integer */ ae_vector* p1, - /* Integer */ ae_vector* p2, - ae_state *_state) +void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const ae_int_t npoints, const bool isupper, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t k; - ae_vector tmp; - - ae_frame_make(_state, &_frame_block); - ae_vector_clear(ties); - *tiecount = 0; - ae_vector_clear(p1); - ae_vector_clear(p2); - ae_vector_init(&tmp, 0, DT_INT, _state); - - - /* - * Special case - */ - if( n<=0 ) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *tiecount = 0; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * Sort A - */ - tagsort(a, n, p1, p2, _state); - - /* - * Process ties - */ - *tiecount = 1; - for(i=1; i<=n-1; i++) - { - if( ae_fp_neq(a->ptr.p_double[i],a->ptr.p_double[i-1]) ) - { - *tiecount = *tiecount+1; - } - } - ae_vector_set_length(ties, *tiecount+1, _state); - ties->ptr.p_int[0] = 0; - k = 1; - for(i=1; i<=n-1; i++) - { - if( ae_fp_neq(a->ptr.p_double[i],a->ptr.p_double[i-1]) ) - { - ties->ptr.p_int[k] = i; - k = k+1; - } - } - ties->ptr.p_int[*tiecount] = n; - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetdistances(const_cast(s.c_ptr()), const_cast(d.c_ptr()), npoints, isupper, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* +This function adds dataset given by distance matrix to the clusterizer +structure. It is important that dataset is not given explicitly - only +distance matrix is given. + +This function overrides all previous calls of ClusterizerSetPoints() or +ClusterizerSetDistances(). + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + D - array[NPoints,NPoints], distance matrix given by its upper + or lower triangle (main diagonal is ignored because its + entries are expected to be zero). + NPoints - number of points + IsUpper - whether upper or lower triangle of D is given. + +NOTE 1: different clustering algorithms have different limitations: + * agglomerative hierarchical clustering algorithms may be used with + any kind of distance metric, including one which is given by + distance matrix + * k-means++ clustering algorithm may be used only with Euclidean + distance function and explicitly given points - it can not be + used with dataset given by distance matrix + Thus, if you call this function, you will be unable to use k-means + clustering algorithm to process your problem. -- ALGLIB -- - Copyright 11.12.2008 by Bochkanov Sergey + Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -void dstiefasti(/* Real */ ae_vector* a, - /* Integer */ ae_vector* b, - ae_int_t n, - /* Integer */ ae_vector* ties, - ae_int_t* tiecount, - /* Real */ ae_vector* bufr, - /* Integer */ ae_vector* bufi, - ae_state *_state) +#if !defined(AE_NO_EXCEPTIONS) +void clusterizersetdistances(const clusterizerstate &s, const real_2d_array &d, const bool isupper, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t k; - ae_vector tmp; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + ae_int_t npoints; + if( (d.rows()!=d.cols())) + _ALGLIB_CPP_EXCEPTION("Error while calling 'clusterizersetdistances': looks like one of arguments has wrong size"); + npoints = d.rows(); + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetdistances(const_cast(s.c_ptr()), const_cast(d.c_ptr()), npoints, isupper, &_alglib_env_state); - ae_frame_make(_state, &_frame_block); - *tiecount = 0; - ae_vector_init(&tmp, 0, DT_INT, _state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +#endif - - /* - * Special case - */ - if( n<=0 ) +/************************************************************************* +This function sets agglomerative hierarchical clustering algorithm + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + Algo - algorithm type: + * 0 complete linkage (default algorithm) + * 1 single linkage + * 2 unweighted average linkage + * 3 weighted average linkage + * 4 Ward's method + +NOTE: Ward's method works correctly only with Euclidean distance, that's + why algorithm will return negative termination code (failure) for + any other distance type. + + It is possible, however, to use this method with user-supplied + distance matrix. It is your responsibility to pass one which was + calculated with Euclidean distance function. + + -- ALGLIB -- + Copyright 10.07.2012 by Bochkanov Sergey +*************************************************************************/ +void clusterizersetahcalgo(const clusterizerstate &s, const ae_int_t algo, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *tiecount = 0; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * Sort A - */ - tagsortfasti(a, b, bufr, bufi, n, _state); - - /* - * Process ties - */ - ties->ptr.p_int[0] = 0; - k = 1; - for(i=1; i<=n-1; i++) - { - if( ae_fp_neq(a->ptr.p_double[i],a->ptr.p_double[i-1]) ) - { - ties->ptr.p_int[k] = i; - k = k+1; - } - } - ties->ptr.p_int[k] = n; - *tiecount = k; - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetahcalgo(const_cast(s.c_ptr()), algo, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Optimal binary classification - -Algorithms finds optimal (=with minimal cross-entropy) binary partition. -Internal subroutine. +This function sets k-means properties: number of restarts and maximum +number of iterations per one run. INPUT PARAMETERS: - A - array[0..N-1], variable - C - array[0..N-1], class numbers (0 or 1). - N - array size - -OUTPUT PARAMETERS: - Info - completetion code: - * -3, all values of A[] are same (partition is impossible) - * -2, one of C[] is incorrect (<0, >1) - * -1, incorrect pararemets were passed (N<=0). - * 1, OK - Threshold- partiton boundary. Left part contains values which are - strictly less than Threshold. Right part contains values - which are greater than or equal to Threshold. - PAL, PBL- probabilities P(0|v=Threshold) and P(1|v>=Threshold) - CVE - cross-validation estimate of cross-entropy + S - clusterizer state, initialized by ClusterizerCreate() + Restarts- restarts count, >=1. + k-means++ algorithm performs several restarts and chooses + best set of centers (one with minimum squared distance). + MaxIts - maximum number of k-means iterations performed during one + run. >=0, zero value means that algorithm performs unlimited + number of iterations. -- ALGLIB -- - Copyright 22.05.2008 by Bochkanov Sergey + Copyright 10.07.2012 by Bochkanov Sergey *************************************************************************/ -void dsoptimalsplit2(/* Real */ ae_vector* a, - /* Integer */ ae_vector* c, - ae_int_t n, - ae_int_t* info, - double* threshold, - double* pal, - double* pbl, - double* par, - double* pbr, - double* cve, - ae_state *_state) +void clusterizersetkmeanslimits(const clusterizerstate &s, const ae_int_t restarts, const ae_int_t maxits, const xparams _xparams) { - ae_frame _frame_block; - ae_vector _a; - ae_vector _c; - ae_int_t i; - ae_int_t t; - double s; - ae_vector ties; - ae_int_t tiecount; - ae_vector p1; - ae_vector p2; - ae_int_t k; - ae_int_t koptimal; - double pak; - double pbk; - double cvoptimal; - double cv; - - ae_frame_make(_state, &_frame_block); - ae_vector_init_copy(&_a, a, _state); - a = &_a; - ae_vector_init_copy(&_c, c, _state); - c = &_c; - *info = 0; - *threshold = 0; - *pal = 0; - *pbl = 0; - *par = 0; - *pbr = 0; - *cve = 0; - ae_vector_init(&ties, 0, DT_INT, _state); - ae_vector_init(&p1, 0, DT_INT, _state); - ae_vector_init(&p2, 0, DT_INT, _state); - - - /* - * Test for errors in inputs - */ - if( n<=0 ) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *info = -1; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - for(i=0; i<=n-1; i++) - { - if( c->ptr.p_int[i]!=0&&c->ptr.p_int[i]!=1 ) - { - *info = -2; - ae_frame_leave(_state); - return; - } - } - *info = 1; - - /* - * Tie - */ - dstie(a, n, &ties, &tiecount, &p1, &p2, _state); - for(i=0; i<=n-1; i++) - { - if( p2.ptr.p_int[i]!=i ) - { - t = c->ptr.p_int[i]; - c->ptr.p_int[i] = c->ptr.p_int[p2.ptr.p_int[i]]; - c->ptr.p_int[p2.ptr.p_int[i]] = t; - } - } - - /* - * Special case: number of ties is 1. - * - * NOTE: we assume that P[i,j] equals to 0 or 1, - * intermediate values are not allowed. - */ - if( tiecount==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetkmeanslimits(const_cast(s.c_ptr()), restarts, maxits, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function sets k-means initialization algorithm. Several different +algorithms can be chosen, including k-means++. + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + InitAlgo- initialization algorithm: + * 0 automatic selection ( different versions of ALGLIB + may select different algorithms) + * 1 random initialization + * 2 k-means++ initialization (best quality of initial + centers, but long non-parallelizable initialization + phase with bad cache locality) + * 3 "fast-greedy" algorithm with efficient, easy to + parallelize initialization. Quality of initial centers + is somewhat worse than that of k-means++. This + algorithm is a default one in the current version of + ALGLIB. + *-1 "debug" algorithm which always selects first K rows + of dataset; this algorithm is used for debug purposes + only. Do not use it in the industrial code! + + -- ALGLIB -- + Copyright 21.01.2015 by Bochkanov Sergey +*************************************************************************/ +void clusterizersetkmeansinit(const clusterizerstate &s, const ae_int_t initalgo, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *info = -3; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * General case, number of ties > 1 - * - * NOTE: we assume that P[i,j] equals to 0 or 1, - * intermediate values are not allowed. - */ - *pal = (double)(0); - *pbl = (double)(0); - *par = (double)(0); - *pbr = (double)(0); - for(i=0; i<=n-1; i++) - { - if( c->ptr.p_int[i]==0 ) - { - *par = *par+1; - } - if( c->ptr.p_int[i]==1 ) - { - *pbr = *pbr+1; - } - } - koptimal = -1; - cvoptimal = ae_maxrealnumber; - for(k=0; k<=tiecount-2; k++) - { - - /* - * first, obtain information about K-th tie which is - * moved from R-part to L-part - */ - pak = (double)(0); - pbk = (double)(0); - for(i=ties.ptr.p_int[k]; i<=ties.ptr.p_int[k+1]-1; i++) - { - if( c->ptr.p_int[i]==0 ) - { - pak = pak+1; - } - if( c->ptr.p_int[i]==1 ) - { - pbk = pbk+1; - } - } - - /* - * Calculate cross-validation CE - */ - cv = (double)(0); - cv = cv-bdss_xlny(*pal+pak, (*pal+pak)/(*pal+pak+(*pbl)+pbk+1), _state); - cv = cv-bdss_xlny(*pbl+pbk, (*pbl+pbk)/(*pal+pak+1+(*pbl)+pbk), _state); - cv = cv-bdss_xlny(*par-pak, (*par-pak)/(*par-pak+(*pbr)-pbk+1), _state); - cv = cv-bdss_xlny(*pbr-pbk, (*pbr-pbk)/(*par-pak+1+(*pbr)-pbk), _state); - - /* - * Compare with best - */ - if( ae_fp_less(cv,cvoptimal) ) - { - cvoptimal = cv; - koptimal = k; - } - - /* - * update - */ - *pal = *pal+pak; - *pbl = *pbl+pbk; - *par = *par-pak; - *pbr = *pbr-pbk; - } - *cve = cvoptimal; - *threshold = 0.5*(a->ptr.p_double[ties.ptr.p_int[koptimal]]+a->ptr.p_double[ties.ptr.p_int[koptimal+1]]); - *pal = (double)(0); - *pbl = (double)(0); - *par = (double)(0); - *pbr = (double)(0); - for(i=0; i<=n-1; i++) - { - if( ae_fp_less(a->ptr.p_double[i],*threshold) ) - { - if( c->ptr.p_int[i]==0 ) - { - *pal = *pal+1; - } - else - { - *pbl = *pbl+1; - } - } - else - { - if( c->ptr.p_int[i]==0 ) - { - *par = *par+1; - } - else - { - *pbr = *pbr+1; - } - } - } - s = *pal+(*pbl); - *pal = *pal/s; - *pbl = *pbl/s; - s = *par+(*pbr); - *par = *par/s; - *pbr = *pbr/s; - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetkmeansinit(const_cast(s.c_ptr()), initalgo, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Optimal partition, internal subroutine. Fast version. - -Accepts: - A array[0..N-1] array of attributes array[0..N-1] - C array[0..N-1] array of class labels - TiesBuf array[0..N] temporaries (ties) - CntBuf array[0..2*NC-1] temporaries (counts) - Alpha centering factor (0<=alpha<=1, recommended value - 0.05) - BufR array[0..N-1] temporaries - BufI array[0..N-1] temporaries +This function sets seed which is used to initialize internal RNG. By +default, deterministic seed is used - same for each run of clusterizer. If +you specify non-deterministic seed value, then some algorithms which +depend on random initialization (in current version: k-means) may return +slightly different results after each run. -Output: - Info error code (">0"=OK, "<0"=bad) - RMS training set RMS error - CVRMS leave-one-out RMS error - -Note: - content of all arrays is changed by subroutine; - it doesn't allocate temporaries. +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + Seed - seed: + * positive values = use deterministic seed for each run of + algorithms which depend on random initialization + * zero or negative values = use non-deterministic seed -- ALGLIB -- - Copyright 11.12.2008 by Bochkanov Sergey + Copyright 08.06.2017 by Bochkanov Sergey *************************************************************************/ -void dsoptimalsplit2fast(/* Real */ ae_vector* a, - /* Integer */ ae_vector* c, - /* Integer */ ae_vector* tiesbuf, - /* Integer */ ae_vector* cntbuf, - /* Real */ ae_vector* bufr, - /* Integer */ ae_vector* bufi, - ae_int_t n, - ae_int_t nc, - double alpha, - ae_int_t* info, - double* threshold, - double* rms, - double* cvrms, - ae_state *_state) +void clusterizersetseed(const clusterizerstate &s, const ae_int_t seed, const xparams _xparams) { - ae_int_t i; - ae_int_t k; - ae_int_t cl; - ae_int_t tiecount; - double cbest; - double cc; - ae_int_t koptimal; - ae_int_t sl; - ae_int_t sr; - double v; - double w; - double x; - - *info = 0; - *threshold = 0; - *rms = 0; - *cvrms = 0; - - - /* - * Test for errors in inputs - */ - if( n<=0||nc<2 ) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *info = -1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - for(i=0; i<=n-1; i++) - { - if( c->ptr.p_int[i]<0||c->ptr.p_int[i]>=nc ) - { - *info = -2; - return; - } - } - *info = 1; - - /* - * Tie - */ - dstiefasti(a, c, n, tiesbuf, &tiecount, bufr, bufi, _state); - - /* - * Special case: number of ties is 1. - */ - if( tiecount==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizersetseed(const_cast(s.c_ptr()), seed, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function performs agglomerative hierarchical clustering + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +NOTE: Agglomerative hierarchical clustering algorithm has two phases: + distance matrix calculation and clustering itself. Only first phase + (distance matrix calculation) is accelerated by Intel MKL and + multithreading. Thus, acceleration is significant only for medium or + high-dimensional problems. + + Although activating multithreading gives some speedup over single- + threaded execution, you should not expect nearly-linear scaling + with respect to cores count. + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + +OUTPUT PARAMETERS: + Rep - clustering results; see description of AHCReport + structure for more information. + +NOTE 1: hierarchical clustering algorithms require large amounts of memory. + In particular, this implementation needs sizeof(double)*NPoints^2 + bytes, which are used to store distance matrix. In case we work + with user-supplied matrix, this amount is multiplied by 2 (we have + to store original matrix and to work with its copy). + + For example, problem with 10000 points would require 800M of RAM, + even when working in a 1-dimensional space. + + -- ALGLIB -- + Copyright 10.07.2012 by Bochkanov Sergey +*************************************************************************/ +void clusterizerrunahc(const clusterizerstate &s, ahcreport &rep, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *info = -3; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * General case, number of ties > 1 - */ - for(i=0; i<=2*nc-1; i++) - { - cntbuf->ptr.p_int[i] = 0; - } - for(i=0; i<=n-1; i++) - { - cntbuf->ptr.p_int[nc+c->ptr.p_int[i]] = cntbuf->ptr.p_int[nc+c->ptr.p_int[i]]+1; - } - koptimal = -1; - *threshold = a->ptr.p_double[n-1]; - cbest = ae_maxrealnumber; - sl = 0; - sr = n; - for(k=0; k<=tiecount-2; k++) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizerrunahc(const_cast(s.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function performs clustering by k-means++ algorithm. + +You may change algorithm properties by calling: +* ClusterizerSetKMeansLimits() to change number of restarts or iterations +* ClusterizerSetKMeansInit() to change initialization algorithm + +By default, one restart and unlimited number of iterations are used. +Initialization algorithm is chosen automatically. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +NOTE: k-means clustering algorithm has two phases: selection of initial + centers and clustering itself. ALGLIB parallelizes both phases. + Parallel version is optimized for the following scenario: medium or + high-dimensional problem (8 or more dimensions) with large number of + points and clusters. However, some speed-up can be obtained even + when assumptions above are violated. + +INPUT PARAMETERS: + S - clusterizer state, initialized by ClusterizerCreate() + K - number of clusters, K>=0. + K can be zero only when algorithm is called for empty + dataset, in this case completion code is set to + success (+1). + If K=0 and dataset size is non-zero, we can not + meaningfully assign points to some center (there are no + centers because K=0) and return -3 as completion code + (failure). + +OUTPUT PARAMETERS: + Rep - clustering results; see description of KMeansReport + structure for more information. + +NOTE 1: k-means clustering can be performed only for datasets with + Euclidean distance function. Algorithm will return negative + completion code in Rep.TerminationType in case dataset was added + to clusterizer with DistType other than Euclidean (or dataset was + specified by distance matrix instead of explicitly given points). + +NOTE 2: by default, k-means uses non-deterministic seed to initialize RNG + which is used to select initial centers. As result, each run of + algorithm may return different values. If you need deterministic + behavior, use ClusterizerSetSeed() function. + + -- ALGLIB -- + Copyright 10.07.2012 by Bochkanov Sergey +*************************************************************************/ +void clusterizerrunkmeans(const clusterizerstate &s, const ae_int_t k, kmeansreport &rep, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * first, move Kth tie from right to left - */ - for(i=tiesbuf->ptr.p_int[k]; i<=tiesbuf->ptr.p_int[k+1]-1; i++) - { - cl = c->ptr.p_int[i]; - cntbuf->ptr.p_int[cl] = cntbuf->ptr.p_int[cl]+1; - cntbuf->ptr.p_int[nc+cl] = cntbuf->ptr.p_int[nc+cl]-1; - } - sl = sl+(tiesbuf->ptr.p_int[k+1]-tiesbuf->ptr.p_int[k]); - sr = sr-(tiesbuf->ptr.p_int[k+1]-tiesbuf->ptr.p_int[k]); - - /* - * Calculate RMS error - */ - v = (double)(0); - for(i=0; i<=nc-1; i++) - { - w = (double)(cntbuf->ptr.p_int[i]); - v = v+w*ae_sqr(w/sl-1, _state); - v = v+(sl-w)*ae_sqr(w/sl, _state); - w = (double)(cntbuf->ptr.p_int[nc+i]); - v = v+w*ae_sqr(w/sr-1, _state); - v = v+(sr-w)*ae_sqr(w/sr, _state); - } - v = ae_sqrt(v/(nc*n), _state); - - /* - * Compare with best - */ - x = (double)(2*sl)/(double)(sl+sr)-1; - cc = v*(1-alpha+alpha*ae_sqr(x, _state)); - if( ae_fp_less(cc,cbest) ) - { - - /* - * store split - */ - *rms = v; - koptimal = k; - cbest = cc; - - /* - * calculate CVRMS error - */ - *cvrms = (double)(0); - for(i=0; i<=nc-1; i++) - { - if( sl>1 ) - { - w = (double)(cntbuf->ptr.p_int[i]); - *cvrms = *cvrms+w*ae_sqr((w-1)/(sl-1)-1, _state); - *cvrms = *cvrms+(sl-w)*ae_sqr(w/(sl-1), _state); - } - else - { - w = (double)(cntbuf->ptr.p_int[i]); - *cvrms = *cvrms+w*ae_sqr((double)1/(double)nc-1, _state); - *cvrms = *cvrms+(sl-w)*ae_sqr((double)1/(double)nc, _state); - } - if( sr>1 ) - { - w = (double)(cntbuf->ptr.p_int[nc+i]); - *cvrms = *cvrms+w*ae_sqr((w-1)/(sr-1)-1, _state); - *cvrms = *cvrms+(sr-w)*ae_sqr(w/(sr-1), _state); - } - else - { - w = (double)(cntbuf->ptr.p_int[nc+i]); - *cvrms = *cvrms+w*ae_sqr((double)1/(double)nc-1, _state); - *cvrms = *cvrms+(sr-w)*ae_sqr((double)1/(double)nc, _state); - } - } - *cvrms = ae_sqrt(*cvrms/(nc*n), _state); - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - - /* - * Calculate threshold. - * Code is a bit complicated because there can be such - * numbers that 0.5(A+B) equals to A or B (if A-B=epsilon) - */ - *threshold = 0.5*(a->ptr.p_double[tiesbuf->ptr.p_int[koptimal]]+a->ptr.p_double[tiesbuf->ptr.p_int[koptimal+1]]); - if( ae_fp_less_eq(*threshold,a->ptr.p_double[tiesbuf->ptr.p_int[koptimal]]) ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizerrunkmeans(const_cast(s.c_ptr()), k, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function returns distance matrix for dataset + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + XY - array[NPoints,NFeatures], dataset + NPoints - number of points, >=0 + NFeatures- number of features, >=1 + DistType- distance function: + * 0 Chebyshev distance (L-inf norm) + * 1 city block distance (L1 norm) + * 2 Euclidean distance (L2 norm, non-squared) + * 10 Pearson correlation: + dist(a,b) = 1-corr(a,b) + * 11 Absolute Pearson correlation: + dist(a,b) = 1-|corr(a,b)| + * 12 Uncentered Pearson correlation (cosine of the angle): + dist(a,b) = a'*b/(|a|*|b|) + * 13 Absolute uncentered Pearson correlation + dist(a,b) = |a'*b|/(|a|*|b|) + * 20 Spearman rank correlation: + dist(a,b) = 1-rankcorr(a,b) + * 21 Absolute Spearman rank correlation + dist(a,b) = 1-|rankcorr(a,b)| + +OUTPUT PARAMETERS: + D - array[NPoints,NPoints], distance matrix + (full matrix is returned, with lower and upper triangles) + +NOTE: different distance functions have different performance penalty: + * Euclidean or Pearson correlation distances are the fastest ones + * Spearman correlation distance function is a bit slower + * city block and Chebyshev distances are order of magnitude slower + + The reason behing difference in performance is that correlation-based + distance functions are computed using optimized linear algebra kernels, + while Chebyshev and city block distance functions are computed using + simple nested loops with two branches at each iteration. + + -- ALGLIB -- + Copyright 10.07.2012 by Bochkanov Sergey +*************************************************************************/ +void clusterizergetdistances(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nfeatures, const ae_int_t disttype, real_2d_array &d, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - *threshold = a->ptr.p_double[tiesbuf->ptr.p_int[koptimal+1]]; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::clusterizergetdistances(const_cast(xy.c_ptr()), npoints, nfeatures, disttype, const_cast(d.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +/************************************************************************* +This function takes as input clusterization report Rep, desired clusters +count K, and builds top K clusters from hierarchical clusterization tree. +It returns assignment of points to clusters (array of cluster indexes). + +INPUT PARAMETERS: + Rep - report from ClusterizerRunAHC() performed on XY + K - desired number of clusters, 1<=K<=NPoints. + K can be zero only when NPoints=0. + +OUTPUT PARAMETERS: + CIdx - array[NPoints], I-th element contains cluster index (from + 0 to K-1) for I-th point of the dataset. + CZ - array[K]. This array allows to convert cluster indexes + returned by this function to indexes used by Rep.Z. J-th + cluster returned by this function corresponds to CZ[J]-th + cluster stored in Rep.Z/PZ/PM. + It is guaranteed that CZ[I](rep.c_ptr()), k, const_cast(cidx.c_ptr()), const_cast(cz.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} /************************************************************************* -Automatic non-optimal discretization, internal subroutine. +This function accepts AHC report Rep, desired minimum intercluster +distance and returns top clusters from hierarchical clusterization tree +which are separated by distance R or HIGHER. + +It returns assignment of points to clusters (array of cluster indexes). + +There is one more function with similar name - ClusterizerSeparatedByCorr, +which returns clusters with intercluster correlation equal to R or LOWER +(note: higher for distance, lower for correlation). + +INPUT PARAMETERS: + Rep - report from ClusterizerRunAHC() performed on XY + R - desired minimum intercluster distance, R>=0 + +OUTPUT PARAMETERS: + K - number of clusters, 1<=K<=NPoints + CIdx - array[NPoints], I-th element contains cluster index (from + 0 to K-1) for I-th point of the dataset. + CZ - array[K]. This array allows to convert cluster indexes + returned by this function to indexes used by Rep.Z. J-th + cluster returned by this function corresponds to CZ[J]-th + cluster stored in Rep.Z/PZ/PM. + It is guaranteed that CZ[I](rep.c_ptr()), r, &k, const_cast(cidx.c_ptr()), const_cast(cz.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - ae_frame_make(_state, &_frame_block); - ae_vector_init_copy(&_a, a, _state); - a = &_a; - ae_vector_init_copy(&_c, c, _state); - c = &_c; - *info = 0; - ae_vector_clear(thresholds); - *ni = 0; - *cve = 0; - ae_vector_init(&ties, 0, DT_INT, _state); - ae_vector_init(&p1, 0, DT_INT, _state); - ae_vector_init(&p2, 0, DT_INT, _state); - ae_vector_init(&cnt, 0, DT_INT, _state); - ae_vector_init(&bestsizes, 0, DT_INT, _state); - ae_vector_init(&cursizes, 0, DT_INT, _state); +/************************************************************************* +This function accepts AHC report Rep, desired maximum intercluster +correlation and returns top clusters from hierarchical clusterization tree +which are separated by correlation R or LOWER. - - /* - * Test for errors in inputs - */ - if( (n<=0||nc<2)||kmax<2 ) +It returns assignment of points to clusters (array of cluster indexes). + +There is one more function with similar name - ClusterizerSeparatedByDist, +which returns clusters with intercluster distance equal to R or HIGHER +(note: higher for distance, lower for correlation). + +INPUT PARAMETERS: + Rep - report from ClusterizerRunAHC() performed on XY + R - desired maximum intercluster correlation, -1<=R<=+1 + +OUTPUT PARAMETERS: + K - number of clusters, 1<=K<=NPoints + CIdx - array[NPoints], I-th element contains cluster index (from + 0 to K-1) for I-th point of the dataset. + CZ - array[K]. This array allows to convert cluster indexes + returned by this function to indexes used by Rep.Z. J-th + cluster returned by this function corresponds to CZ[J]-th + cluster stored in Rep.Z/PZ/PM. + It is guaranteed that CZ[I](rep.c_ptr()), r, &k, const_cast(cidx.c_ptr()), const_cast(cz.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +#endif + +#if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +A random forest (decision forest) builder object. + +Used to store dataset and specify decision forest training algorithm settings. +*************************************************************************/ +_decisionforestbuilder_owner::_decisionforestbuilder_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - if( c->ptr.p_int[i]<0||c->ptr.p_int[i]>=nc ) + if( p_struct!=NULL ) { - *info = -2; - ae_frame_leave(_state); - return; + alglib_impl::_decisionforestbuilder_destroy(p_struct); + alglib_impl::ae_free(p_struct); } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - *info = 1; + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::decisionforestbuilder*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforestbuilder), &_state); + memset(p_struct, 0, sizeof(alglib_impl::decisionforestbuilder)); + alglib_impl::_decisionforestbuilder_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_decisionforestbuilder_owner::_decisionforestbuilder_owner(const _decisionforestbuilder_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Tie - */ - dstie(a, n, &ties, &tiecount, &p1, &p2, _state); - for(i=0; i<=n-1; i++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - if( p2.ptr.p_int[i]!=i ) + if( p_struct!=NULL ) { - k = c->ptr.p_int[i]; - c->ptr.p_int[i] = c->ptr.p_int[p2.ptr.p_int[i]]; - c->ptr.p_int[p2.ptr.p_int[i]] = k; + alglib_impl::_decisionforestbuilder_destroy(p_struct); + alglib_impl::ae_free(p_struct); } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: decisionforestbuilder copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::decisionforestbuilder*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforestbuilder), &_state); + memset(p_struct, 0, sizeof(alglib_impl::decisionforestbuilder)); + alglib_impl::_decisionforestbuilder_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_decisionforestbuilder_owner& _decisionforestbuilder_owner::operator=(const _decisionforestbuilder_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Special cases - */ - if( tiecount==1 ) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - *info = -3; - ae_frame_leave(_state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: decisionforestbuilder assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: decisionforestbuilder assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_decisionforestbuilder_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::decisionforestbuilder)); + alglib_impl::_decisionforestbuilder_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_decisionforestbuilder_owner::~_decisionforestbuilder_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_decisionforestbuilder_destroy(p_struct); + ae_free(p_struct); + } +} + +alglib_impl::decisionforestbuilder* _decisionforestbuilder_owner::c_ptr() +{ + return p_struct; +} + +alglib_impl::decisionforestbuilder* _decisionforestbuilder_owner::c_ptr() const +{ + return const_cast(p_struct); +} +decisionforestbuilder::decisionforestbuilder() : _decisionforestbuilder_owner() +{ +} + +decisionforestbuilder::decisionforestbuilder(const decisionforestbuilder &rhs):_decisionforestbuilder_owner(rhs) +{ +} + +decisionforestbuilder& decisionforestbuilder::operator=(const decisionforestbuilder &rhs) +{ + if( this==&rhs ) + return *this; + _decisionforestbuilder_owner::operator=(rhs); + return *this; +} + +decisionforestbuilder::~decisionforestbuilder() +{ +} + + +/************************************************************************* +Buffer object which is used to perform various requests (usually model +inference) in the multithreaded mode (multiple threads working with same +DF object). + +This object should be created with DFCreateBuffer(). +*************************************************************************/ +_decisionforestbuffer_owner::_decisionforestbuffer_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * General case: - * 0. allocate arrays - */ - kmax = ae_minint(kmax, tiecount, _state); - ae_vector_set_length(&bestsizes, kmax-1+1, _state); - ae_vector_set_length(&cursizes, kmax-1+1, _state); - ae_vector_set_length(&cnt, nc-1+1, _state); - - /* - * General case: - * 1. prepare "weak" solution (two subintervals, divided at median) - */ - v2 = ae_maxrealnumber; - j = -1; - for(i=1; i<=tiecount-1; i++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - if( ae_fp_less(ae_fabs(ties.ptr.p_int[i]-0.5*(n-1), _state),v2) ) + if( p_struct!=NULL ) { - v2 = ae_fabs(ties.ptr.p_int[i]-0.5*n, _state); - j = i; + alglib_impl::_decisionforestbuffer_destroy(p_struct); + alglib_impl::ae_free(p_struct); } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - ae_assert(j>0, "DSSplitK: internal error #1!", _state); - bestk = 2; - bestsizes.ptr.p_int[0] = ties.ptr.p_int[j]; - bestsizes.ptr.p_int[1] = n-j; - bestcve = (double)(0); - for(i=0; i<=nc-1; i++) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::decisionforestbuffer*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforestbuffer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::decisionforestbuffer)); + alglib_impl::_decisionforestbuffer_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_decisionforestbuffer_owner::_decisionforestbuffer_owner(const _decisionforestbuffer_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - cnt.ptr.p_int[i] = 0; + if( p_struct!=NULL ) + { + alglib_impl::_decisionforestbuffer_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - for(i=0; i<=j-1; i++) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: decisionforestbuffer copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::decisionforestbuffer*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforestbuffer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::decisionforestbuffer)); + alglib_impl::_decisionforestbuffer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_decisionforestbuffer_owner& _decisionforestbuffer_owner::operator=(const _decisionforestbuffer_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - bdss_tieaddc(c, &ties, i, nc, &cnt, _state); - } - bestcve = bestcve+bdss_getcv(&cnt, nc, _state); - for(i=0; i<=nc-1; i++) - { - cnt.ptr.p_int[i] = 0; - } - for(i=j; i<=tiecount-1; i++) - { - bdss_tieaddc(c, &ties, i, nc, &cnt, _state); - } - bestcve = bestcve+bdss_getcv(&cnt, nc, _state); - - /* - * General case: - * 2. Use greedy algorithm to find sub-optimal split in O(KMax*N) time - */ - for(k=2; k<=kmax; k++) - { - - /* - * Prepare greedy K-interval split - */ - for(i=0; i<=k-1; i++) - { - cursizes.ptr.p_int[i] = 0; - } - i = 0; - j = 0; - while(j<=tiecount-1&&i<=k-1) - { - - /* - * Rule: I-th bin is empty, fill it - */ - if( cursizes.ptr.p_int[i]==0 ) - { - cursizes.ptr.p_int[i] = ties.ptr.p_int[j+1]-ties.ptr.p_int[j]; - j = j+1; - continue; - } - - /* - * Rule: (K-1-I) bins left, (K-1-I) ties left (1 tie per bin); next bin - */ - if( tiecount-j==k-1-i ) - { - i = i+1; - continue; - } - - /* - * Rule: last bin, always place in current - */ - if( i==k-1 ) - { - cursizes.ptr.p_int[i] = cursizes.ptr.p_int[i]+ties.ptr.p_int[j+1]-ties.ptr.p_int[j]; - j = j+1; - continue; - } - - /* - * Place J-th tie in I-th bin, or leave for I+1-th bin. - */ - if( ae_fp_less(ae_fabs(cursizes.ptr.p_int[i]+ties.ptr.p_int[j+1]-ties.ptr.p_int[j]-(double)n/(double)k, _state),ae_fabs(cursizes.ptr.p_int[i]-(double)n/(double)k, _state)) ) - { - cursizes.ptr.p_int[i] = cursizes.ptr.p_int[i]+ties.ptr.p_int[j+1]-ties.ptr.p_int[j]; - j = j+1; - } - else - { - i = i+1; - } - } - ae_assert(cursizes.ptr.p_int[k-1]!=0&&j==tiecount, "DSSplitK: internal error #1", _state); - - /* - * Calculate CVE - */ - curcve = (double)(0); - j = 0; - for(i=0; i<=k-1; i++) - { - for(j1=0; j1<=nc-1; j1++) - { - cnt.ptr.p_int[j1] = 0; - } - for(j1=j; j1<=j+cursizes.ptr.p_int[i]-1; j1++) - { - cnt.ptr.p_int[c->ptr.p_int[j1]] = cnt.ptr.p_int[c->ptr.p_int[j1]]+1; - } - curcve = curcve+bdss_getcv(&cnt, nc, _state); - j = j+cursizes.ptr.p_int[i]; - } - - /* - * Choose best variant - */ - if( ae_fp_less(curcve,bestcve) ) - { - for(i=0; i<=k-1; i++) - { - bestsizes.ptr.p_int[i] = cursizes.ptr.p_int[i]; - } - bestcve = curcve; - bestk = k; - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - - /* - * Transform from sizes to thresholds - */ - *cve = bestcve; - *ni = bestk; - ae_vector_set_length(thresholds, *ni-2+1, _state); - j = bestsizes.ptr.p_int[0]; - for(i=1; i<=bestk-1; i++) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: decisionforestbuffer assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: decisionforestbuffer assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_decisionforestbuffer_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::decisionforestbuffer)); + alglib_impl::_decisionforestbuffer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_decisionforestbuffer_owner::~_decisionforestbuffer_owner() +{ + if( p_struct!=NULL ) { - thresholds->ptr.p_double[i-1] = 0.5*(a->ptr.p_double[j-1]+a->ptr.p_double[j]); - j = j+bestsizes.ptr.p_int[i]; + alglib_impl::_decisionforestbuffer_destroy(p_struct); + ae_free(p_struct); } - ae_frame_leave(_state); } +alglib_impl::decisionforestbuffer* _decisionforestbuffer_owner::c_ptr() +{ + return p_struct; +} -/************************************************************************* -Automatic optimal discretization, internal subroutine. +alglib_impl::decisionforestbuffer* _decisionforestbuffer_owner::c_ptr() const +{ + return const_cast(p_struct); +} +decisionforestbuffer::decisionforestbuffer() : _decisionforestbuffer_owner() +{ +} - -- ALGLIB -- - Copyright 22.05.2008 by Bochkanov Sergey -*************************************************************************/ -void dsoptimalsplitk(/* Real */ ae_vector* a, - /* Integer */ ae_vector* c, - ae_int_t n, - ae_int_t nc, - ae_int_t kmax, - ae_int_t* info, - /* Real */ ae_vector* thresholds, - ae_int_t* ni, - double* cve, - ae_state *_state) +decisionforestbuffer::decisionforestbuffer(const decisionforestbuffer &rhs):_decisionforestbuffer_owner(rhs) { - ae_frame _frame_block; - ae_vector _a; - ae_vector _c; - ae_int_t i; - ae_int_t j; - ae_int_t s; - ae_int_t jl; - ae_int_t jr; - double v2; - ae_vector ties; - ae_int_t tiecount; - ae_vector p1; - ae_vector p2; - double cvtemp; - ae_vector cnt; - ae_vector cnt2; - ae_matrix cv; - ae_matrix splits; - ae_int_t k; - ae_int_t koptimal; - double cvoptimal; +} - ae_frame_make(_state, &_frame_block); - ae_vector_init_copy(&_a, a, _state); - a = &_a; - ae_vector_init_copy(&_c, c, _state); - c = &_c; - *info = 0; - ae_vector_clear(thresholds); - *ni = 0; - *cve = 0; - ae_vector_init(&ties, 0, DT_INT, _state); - ae_vector_init(&p1, 0, DT_INT, _state); - ae_vector_init(&p2, 0, DT_INT, _state); - ae_vector_init(&cnt, 0, DT_INT, _state); - ae_vector_init(&cnt2, 0, DT_INT, _state); - ae_matrix_init(&cv, 0, 0, DT_REAL, _state); - ae_matrix_init(&splits, 0, 0, DT_INT, _state); +decisionforestbuffer& decisionforestbuffer::operator=(const decisionforestbuffer &rhs) +{ + if( this==&rhs ) + return *this; + _decisionforestbuffer_owner::operator=(rhs); + return *this; +} +decisionforestbuffer::~decisionforestbuffer() +{ +} + + +/************************************************************************* +Decision forest (random forest) model. +*************************************************************************/ +_decisionforest_owner::_decisionforest_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Test for errors in inputs - */ - if( (n<=0||nc<2)||kmax<2 ) - { - *info = -1; - ae_frame_leave(_state); - return; - } - for(i=0; i<=n-1; i++) - { - if( c->ptr.p_int[i]<0||c->ptr.p_int[i]>=nc ) - { - *info = -2; - ae_frame_leave(_state); - return; - } - } - *info = 1; - - /* - * Tie - */ - dstie(a, n, &ties, &tiecount, &p1, &p2, _state); - for(i=0; i<=n-1; i++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - if( p2.ptr.p_int[i]!=i ) + if( p_struct!=NULL ) { - k = c->ptr.p_int[i]; - c->ptr.p_int[i] = c->ptr.p_int[p2.ptr.p_int[i]]; - c->ptr.p_int[p2.ptr.p_int[i]] = k; - } - } - - /* - * Special cases - */ - if( tiecount==1 ) - { - *info = -3; - ae_frame_leave(_state); + alglib_impl::_decisionforest_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::decisionforest*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforest), &_state); + memset(p_struct, 0, sizeof(alglib_impl::decisionforest)); + alglib_impl::_decisionforest_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_decisionforest_owner::_decisionforest_owner(const _decisionforest_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * General case - * Use dynamic programming to find best split in O(KMax*NC*TieCount^2) time - */ - kmax = ae_minint(kmax, tiecount, _state); - ae_matrix_set_length(&cv, kmax-1+1, tiecount-1+1, _state); - ae_matrix_set_length(&splits, kmax-1+1, tiecount-1+1, _state); - ae_vector_set_length(&cnt, nc-1+1, _state); - ae_vector_set_length(&cnt2, nc-1+1, _state); - for(j=0; j<=nc-1; j++) - { - cnt.ptr.p_int[j] = 0; - } - for(j=0; j<=tiecount-1; j++) - { - bdss_tieaddc(c, &ties, j, nc, &cnt, _state); - splits.ptr.pp_int[0][j] = 0; - cv.ptr.pp_double[0][j] = bdss_getcv(&cnt, nc, _state); - } - for(k=1; k<=kmax-1; k++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - for(j=0; j<=nc-1; j++) - { - cnt.ptr.p_int[j] = 0; - } - - /* - * Subtask size J in [K..TieCount-1]: - * optimal K-splitting on ties from 0-th to J-th. - */ - for(j=k; j<=tiecount-1; j++) + if( p_struct!=NULL ) { - - /* - * Update Cnt - let it contain classes of ties from K-th to J-th - */ - bdss_tieaddc(c, &ties, j, nc, &cnt, _state); - - /* - * Search for optimal split point S in [K..J] - */ - for(i=0; i<=nc-1; i++) - { - cnt2.ptr.p_int[i] = cnt.ptr.p_int[i]; - } - cv.ptr.pp_double[k][j] = cv.ptr.pp_double[k-1][j-1]+bdss_getcv(&cnt2, nc, _state); - splits.ptr.pp_int[k][j] = j; - for(s=k+1; s<=j; s++) - { - - /* - * Update Cnt2 - let it contain classes of ties from S-th to J-th - */ - bdss_tiesubc(c, &ties, s-1, nc, &cnt2, _state); - - /* - * Calculate CVE - */ - cvtemp = cv.ptr.pp_double[k-1][s-1]+bdss_getcv(&cnt2, nc, _state); - if( ae_fp_less(cvtemp,cv.ptr.pp_double[k][j]) ) - { - cv.ptr.pp_double[k][j] = cvtemp; - splits.ptr.pp_int[k][j] = s; - } - } - } + alglib_impl::_decisionforest_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: decisionforest copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::decisionforest*)alglib_impl::ae_malloc(sizeof(alglib_impl::decisionforest), &_state); + memset(p_struct, 0, sizeof(alglib_impl::decisionforest)); + alglib_impl::_decisionforest_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_decisionforest_owner& _decisionforest_owner::operator=(const _decisionforest_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Choose best partition, output result - */ - koptimal = -1; - cvoptimal = ae_maxrealnumber; - for(k=0; k<=kmax-1; k++) - { - if( ae_fp_less(cv.ptr.pp_double[k][tiecount-1],cvoptimal) ) - { - cvoptimal = cv.ptr.pp_double[k][tiecount-1]; - koptimal = k; - } - } - ae_assert(koptimal>=0, "DSOptimalSplitK: internal error #1!", _state); - if( koptimal==0 ) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - - /* - * Special case: best partition is one big interval. - * Even 2-partition is not better. - * This is possible when dealing with "weak" predictor variables. - * - * Make binary split as close to the median as possible. - */ - v2 = ae_maxrealnumber; - j = -1; - for(i=1; i<=tiecount-1; i++) - { - if( ae_fp_less(ae_fabs(ties.ptr.p_int[i]-0.5*(n-1), _state),v2) ) - { - v2 = ae_fabs(ties.ptr.p_int[i]-0.5*(n-1), _state); - j = i; - } - } - ae_assert(j>0, "DSOptimalSplitK: internal error #2!", _state); - ae_vector_set_length(thresholds, 0+1, _state); - thresholds->ptr.p_double[0] = 0.5*(a->ptr.p_double[ties.ptr.p_int[j-1]]+a->ptr.p_double[ties.ptr.p_int[j]]); - *ni = 2; - *cve = (double)(0); - for(i=0; i<=nc-1; i++) - { - cnt.ptr.p_int[i] = 0; - } - for(i=0; i<=j-1; i++) - { - bdss_tieaddc(c, &ties, i, nc, &cnt, _state); - } - *cve = *cve+bdss_getcv(&cnt, nc, _state); - for(i=0; i<=nc-1; i++) - { - cnt.ptr.p_int[i] = 0; - } - for(i=j; i<=tiecount-1; i++) - { - bdss_tieaddc(c, &ties, i, nc, &cnt, _state); - } - *cve = *cve+bdss_getcv(&cnt, nc, _state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - else + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: decisionforest assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: decisionforest assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_decisionforest_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::decisionforest)); + alglib_impl::_decisionforest_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_decisionforest_owner::~_decisionforest_owner() +{ + if( p_struct!=NULL ) { - - /* - * General case: 2 or more intervals - * - * NOTE: we initialize both JL and JR (left and right bounds), - * altough algorithm needs only JL. - */ - ae_vector_set_length(thresholds, koptimal-1+1, _state); - *ni = koptimal+1; - *cve = cv.ptr.pp_double[koptimal][tiecount-1]; - jl = splits.ptr.pp_int[koptimal][tiecount-1]; - jr = tiecount-1; - for(k=koptimal; k>=1; k--) - { - thresholds->ptr.p_double[k-1] = 0.5*(a->ptr.p_double[ties.ptr.p_int[jl-1]]+a->ptr.p_double[ties.ptr.p_int[jl]]); - jr = jl-1; - jl = splits.ptr.pp_int[k-1][jl-1]; - } - touchint(&jr, _state); + alglib_impl::_decisionforest_destroy(p_struct); + ae_free(p_struct); } - ae_frame_leave(_state); } +alglib_impl::decisionforest* _decisionforest_owner::c_ptr() +{ + return p_struct; +} -/************************************************************************* -Internal function -*************************************************************************/ -static double bdss_xlny(double x, double y, ae_state *_state) +alglib_impl::decisionforest* _decisionforest_owner::c_ptr() const { - double result; + return const_cast(p_struct); +} +decisionforest::decisionforest() : _decisionforest_owner() +{ +} +decisionforest::decisionforest(const decisionforest &rhs):_decisionforest_owner(rhs) +{ +} - if( ae_fp_eq(x,(double)(0)) ) - { - result = (double)(0); - } - else - { - result = x*ae_log(y, _state); - } - return result; +decisionforest& decisionforest::operator=(const decisionforest &rhs) +{ + if( this==&rhs ) + return *this; + _decisionforest_owner::operator=(rhs); + return *this; +} + +decisionforest::~decisionforest() +{ } /************************************************************************* -Internal function, -returns number of samples of class I in Cnt[I] -*************************************************************************/ -static double bdss_getcv(/* Integer */ ae_vector* cnt, - ae_int_t nc, - ae_state *_state) -{ - ae_int_t i; - double s; - double result; +Decision forest training report. +=== training/oob errors ================================================== - s = (double)(0); - for(i=0; i<=nc-1; i++) +Following fields store training set errors: +* relclserror - fraction of misclassified cases, [0,1] +* avgce - average cross-entropy in bits per symbol +* rmserror - root-mean-square error +* avgerror - average error +* avgrelerror - average relative error + +Out-of-bag estimates are stored in fields with same names, but "oob" prefix. + +For classification problems: +* RMS, AVG and AVGREL errors are calculated for posterior probabilities + +For regression problems: +* RELCLS and AVGCE errors are zero + +=== variable importance ================================================== + +Following fields are used to store variable importance information: + +* topvars - variables ordered from the most important to + less important ones (according to current + choice of importance raiting). + For example, topvars[0] contains index of the + most important variable, and topvars[0:2] are + indexes of 3 most important ones and so on. + +* varimportances - array[nvars], ratings (the larger, the more + important the variable is, always in [0,1] + range). + By default, filled by zeros (no importance + ratings are provided unless you explicitly + request them). + Zero rating means that variable is not important, + however you will rarely encounter such a thing, + in many cases unimportant variables produce + nearly-zero (but nonzero) ratings. + +Variable importance report must be EXPLICITLY requested by calling: +* dfbuildersetimportancegini() function, if you need out-of-bag Gini-based + importance rating also known as MDI (fast to calculate, resistant to + overfitting issues, but has some bias towards continuous and + high-cardinality categorical variables) +* dfbuildersetimportancetrngini() function, if you need training set Gini- + -based importance rating (what other packages typically report). +* dfbuildersetimportancepermutation() function, if you need permutation- + based importance rating also known as MDA (slower to calculate, but less + biased) +* dfbuildersetimportancenone() function, if you do not need importance + ratings - ratings will be zero, topvars[] will be [0,1,2,...] + +Different importance ratings (Gini or permutation) produce non-comparable +values. Although in all cases rating values lie in [0,1] range, there are +exist differences: +* informally speaking, Gini importance rating tends to divide "unit amount + of importance" between several important variables, i.e. it produces + estimates which roughly sum to 1.0 (or less than 1.0, if your task can + not be solved exactly). If all variables are equally important, they + will have same rating, roughly 1/NVars, even if every variable is + critically important. +* from the other side, permutation importance tells us what percentage of + the model predictive power will be ruined by permuting this specific + variable. It does not produce estimates which sum to one. Critically + important variable will have rating close to 1.0, and you may have + multiple variables with such a rating. + +More information on variable importance ratings can be found in comments +on the dfbuildersetimportancegini() and dfbuildersetimportancepermutation() +functions. +*************************************************************************/ +_dfreport_owner::_dfreport_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - s = s+cnt->ptr.p_int[i]; + if( p_struct!=NULL ) + { + alglib_impl::_dfreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - result = (double)(0); - for(i=0; i<=nc-1; i++) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::dfreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::dfreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::dfreport)); + alglib_impl::_dfreport_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_dfreport_owner::_dfreport_owner(const _dfreport_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - result = result-bdss_xlny((double)(cnt->ptr.p_int[i]), cnt->ptr.p_int[i]/(s+nc-1), _state); + if( p_struct!=NULL ) + { + alglib_impl::_dfreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - return result; + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: dfreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::dfreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::dfreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::dfreport)); + alglib_impl::_dfreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); } - -/************************************************************************* -Internal function, adds number of samples of class I in tie NTie to Cnt[I] -*************************************************************************/ -static void bdss_tieaddc(/* Integer */ ae_vector* c, - /* Integer */ ae_vector* ties, - ae_int_t ntie, - ae_int_t nc, - /* Integer */ ae_vector* cnt, - ae_state *_state) +_dfreport_owner& _dfreport_owner::operator=(const _dfreport_owner &rhs) { - ae_int_t i; - - - for(i=ties->ptr.p_int[ntie]; i<=ties->ptr.p_int[ntie+1]-1; i++) + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - cnt->ptr.p_int[c->ptr.p_int[i]] = cnt->ptr.p_int[c->ptr.p_int[i]]+1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: dfreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: dfreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_dfreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::dfreport)); + alglib_impl::_dfreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; } - -/************************************************************************* -Internal function, subtracts number of samples of class I in tie NTie to Cnt[I] -*************************************************************************/ -static void bdss_tiesubc(/* Integer */ ae_vector* c, - /* Integer */ ae_vector* ties, - ae_int_t ntie, - ae_int_t nc, - /* Integer */ ae_vector* cnt, - ae_state *_state) +_dfreport_owner::~_dfreport_owner() { - ae_int_t i; - - - for(i=ties->ptr.p_int[ntie]; i<=ties->ptr.p_int[ntie+1]-1; i++) + if( p_struct!=NULL ) { - cnt->ptr.p_int[c->ptr.p_int[i]] = cnt->ptr.p_int[c->ptr.p_int[i]]-1; + alglib_impl::_dfreport_destroy(p_struct); + ae_free(p_struct); } } - -void _cvreport_init(void* _p, ae_state *_state) +alglib_impl::dfreport* _dfreport_owner::c_ptr() { - cvreport *p = (cvreport*)_p; - ae_touch_ptr((void*)p); + return p_struct; } - -void _cvreport_init_copy(void* _dst, void* _src, ae_state *_state) +alglib_impl::dfreport* _dfreport_owner::c_ptr() const +{ + return const_cast(p_struct); +} +dfreport::dfreport() : _dfreport_owner() ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),oobrelclserror(p_struct->oobrelclserror),oobavgce(p_struct->oobavgce),oobrmserror(p_struct->oobrmserror),oobavgerror(p_struct->oobavgerror),oobavgrelerror(p_struct->oobavgrelerror),topvars(&p_struct->topvars),varimportances(&p_struct->varimportances) { - cvreport *dst = (cvreport*)_dst; - cvreport *src = (cvreport*)_src; - dst->relclserror = src->relclserror; - dst->avgce = src->avgce; - dst->rmserror = src->rmserror; - dst->avgerror = src->avgerror; - dst->avgrelerror = src->avgrelerror; } - -void _cvreport_clear(void* _p) +dfreport::dfreport(const dfreport &rhs):_dfreport_owner(rhs) ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror),oobrelclserror(p_struct->oobrelclserror),oobavgce(p_struct->oobavgce),oobrmserror(p_struct->oobrmserror),oobavgerror(p_struct->oobavgerror),oobavgrelerror(p_struct->oobavgrelerror),topvars(&p_struct->topvars),varimportances(&p_struct->varimportances) { - cvreport *p = (cvreport*)_p; - ae_touch_ptr((void*)p); } - -void _cvreport_destroy(void* _p) +dfreport& dfreport::operator=(const dfreport &rhs) { - cvreport *p = (cvreport*)_p; - ae_touch_ptr((void*)p); + if( this==&rhs ) + return *this; + _dfreport_owner::operator=(rhs); + return *this; } - +dfreport::~dfreport() +{ +} /************************************************************************* -This function initializes clusterizer object. Newly initialized object is -empty, i.e. it does not contain dataset. You should use it as follows: -1. creation -2. dataset is added with ClusterizerSetPoints() -3. additional parameters are set -3. clusterization is performed with one of the clustering functions +This function serializes data structure to string. - -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. *************************************************************************/ -void clusterizercreate(clusterizerstate* s, ae_state *_state) +void dfserialize(decisionforest &obj, std::string &s_out) { + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + alglib_impl::ae_int_t ssize; - _clusterizerstate_clear(s); + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::dfalloc(&serializer, obj.c_ptr(), &state); + ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); + s_out.clear(); + s_out.reserve((size_t)(ssize+1)); + alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); + alglib_impl::dfserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_assert( s_out.length()<=(size_t)ssize, "ALGLIB: serialization integrity error", &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} +/************************************************************************* +This function unserializes data structure from string. +*************************************************************************/ +void dfunserialize(const std::string &s_in, decisionforest &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; - s->npoints = 0; - s->nfeatures = 0; - s->disttype = 2; - s->ahcalgo = 0; - s->kmeansrestarts = 1; - s->kmeansmaxits = 0; - s->kmeansinitalgo = 0; - s->kmeansdbgnoits = ae_false; - kmeansinitbuf(&s->kmeanstmp, _state); + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); + alglib_impl::dfunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } /************************************************************************* -This function adds dataset to the clusterizer structure. - -This function overrides all previous calls of ClusterizerSetPoints() or -ClusterizerSetDistances(). +This function serializes data structure to C++ stream. -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - XY - array[NPoints,NFeatures], dataset - NPoints - number of points, >=0 - NFeatures- number of features, >=1 - DistType- distance function: - * 0 Chebyshev distance (L-inf norm) - * 1 city block distance (L1 norm) - * 2 Euclidean distance (L2 norm), non-squared - * 10 Pearson correlation: - dist(a,b) = 1-corr(a,b) - * 11 Absolute Pearson correlation: - dist(a,b) = 1-|corr(a,b)| - * 12 Uncentered Pearson correlation (cosine of the angle): - dist(a,b) = a'*b/(|a|*|b|) - * 13 Absolute uncentered Pearson correlation - dist(a,b) = |a'*b|/(|a|*|b|) - * 20 Spearman rank correlation: - dist(a,b) = 1-rankcorr(a,b) - * 21 Absolute Spearman rank correlation - dist(a,b) = 1-|rankcorr(a,b)| +Data stream generated by this function is same as string representation +generated by string version of serializer - alphanumeric characters, +dots, underscores, minus signs, which are grouped into words separated by +spaces and CR+LF. -NOTE 1: different distance functions have different performance penalty: - * Euclidean or Pearson correlation distances are the fastest ones - * Spearman correlation distance function is a bit slower - * city block and Chebyshev distances are order of magnitude slower - - The reason behing difference in performance is that correlation-based - distance functions are computed using optimized linear algebra kernels, - while Chebyshev and city block distance functions are computed using - simple nested loops with two branches at each iteration. - -NOTE 2: different clustering algorithms have different limitations: - * agglomerative hierarchical clustering algorithms may be used with - any kind of distance metric - * k-means++ clustering algorithm may be used only with Euclidean - distance function - Thus, list of specific clustering algorithms you may use depends - on distance function you specify when you set your dataset. - - -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey +We recommend you to read comments on string version of serializer to find +out more about serialization of AlGLIB objects. *************************************************************************/ -void clusterizersetpoints(clusterizerstate* s, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nfeatures, - ae_int_t disttype, - ae_state *_state) +void dfserialize(decisionforest &obj, std::ostream &s_out) { - ae_int_t i; - + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; - ae_assert((((((((disttype==0||disttype==1)||disttype==2)||disttype==10)||disttype==11)||disttype==12)||disttype==13)||disttype==20)||disttype==21, "ClusterizerSetPoints: incorrect DistType", _state); - ae_assert(npoints>=0, "ClusterizerSetPoints: NPoints<0", _state); - ae_assert(nfeatures>=1, "ClusterizerSetPoints: NFeatures<1", _state); - ae_assert(xy->rows>=npoints, "ClusterizerSetPoints: Rows(XY)cols>=nfeatures, "ClusterizerSetPoints: Cols(XY)npoints = npoints; - s->nfeatures = nfeatures; - s->disttype = disttype; - rmatrixsetlengthatleast(&s->xy, npoints, nfeatures, _state); - for(i=0; i<=npoints-1; i++) + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - ae_v_move(&s->xy.ptr.pp_double[i][0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nfeatures-1)); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::dfalloc(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_get_alloc_size(&serializer); // not actually needed, but we have to ask + alglib_impl::ae_serializer_sstart_stream(&serializer, &s_out); + alglib_impl::dfserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); } +/************************************************************************* +This function unserializes data structure from stream. +*************************************************************************/ +void dfunserialize(const std::istream &s_in, decisionforest &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_stream(&serializer, &s_in); + alglib_impl::dfunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} /************************************************************************* -This function adds dataset given by distance matrix to the clusterizer -structure. It is important that dataset is not given explicitly - only -distance matrix is given. +This function creates buffer structure which can be used to perform +parallel inference requests. -This function overrides all previous calls of ClusterizerSetPoints() or -ClusterizerSetDistances(). +DF subpackage provides two sets of computing functions - ones which use +internal buffer of DF model (these functions are single-threaded because +they use same buffer, which can not shared between threads), and ones +which use external buffer. + +This function is used to initialize external buffer. + +INPUT PARAMETERS + Model - DF model which is associated with newly created buffer + +OUTPUT PARAMETERS + Buf - external buffer. -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - D - array[NPoints,NPoints], distance matrix given by its upper - or lower triangle (main diagonal is ignored because its - entries are expected to be zero). - NPoints - number of points - IsUpper - whether upper or lower triangle of D is given. - -NOTE 1: different clustering algorithms have different limitations: - * agglomerative hierarchical clustering algorithms may be used with - any kind of distance metric, including one which is given by - distance matrix - * k-means++ clustering algorithm may be used only with Euclidean - distance function and explicitly given points - it can not be - used with dataset given by distance matrix - Thus, if you call this function, you will be unable to use k-means - clustering algorithm to process your problem. + +IMPORTANT: buffer object should be used only with model which was used to + initialize buffer. Any attempt to use buffer with different + object is dangerous - you may get integrity check failure + (exception) because sizes of internal arrays do not fit to + dimensions of the model structure. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -void clusterizersetdistances(clusterizerstate* s, - /* Real */ ae_matrix* d, - ae_int_t npoints, - ae_bool isupper, - ae_state *_state) +void dfcreatebuffer(const decisionforest &model, decisionforestbuffer &buf, const xparams _xparams) { - ae_int_t i; - ae_int_t j; - ae_int_t j0; - ae_int_t j1; - - - ae_assert(npoints>=0, "ClusterizerSetDistances: NPoints<0", _state); - ae_assert(d->rows>=npoints, "ClusterizerSetDistances: Rows(D)cols>=npoints, "ClusterizerSetDistances: Cols(D)npoints = npoints; - s->nfeatures = 0; - s->disttype = -1; - rmatrixsetlengthatleast(&s->d, npoints, npoints, _state); - for(i=0; i<=npoints-1; i++) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - if( isupper ) - { - j0 = i+1; - j1 = npoints-1; - } - else - { - j0 = 0; - j1 = i-1; - } - for(j=j0; j<=j1; j++) - { - ae_assert(ae_isfinite(d->ptr.pp_double[i][j], _state)&&ae_fp_greater_eq(d->ptr.pp_double[i][j],(double)(0)), "ClusterizerSetDistances: D contains infinite, NAN or negative elements", _state); - s->d.ptr.pp_double[i][j] = d->ptr.pp_double[i][j]; - s->d.ptr.pp_double[j][i] = d->ptr.pp_double[i][j]; - } - s->d.ptr.pp_double[i][i] = (double)(0); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfcreatebuffer(const_cast(model.c_ptr()), const_cast(buf.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function sets agglomerative hierarchical clustering algorithm +This subroutine creates DecisionForestBuilder object which is used to +train decision forests. + +By default, new builder stores empty dataset and some reasonable default +settings. At the very least, you should specify dataset prior to building +decision forest. You can also tweak settings of the forest construction +algorithm (recommended, although default setting should work well). + +Following actions are mandatory: +* calling dfbuildersetdataset() to specify dataset +* calling dfbuilderbuildrandomforest() to build decision forest using + current dataset and default settings + +Additionally, you may call: +* dfbuildersetrndvars() or dfbuildersetrndvarsratio() to specify number of + variables randomly chosen for each split +* dfbuildersetsubsampleratio() to specify fraction of the dataset randomly + subsampled to build each tree +* dfbuildersetseed() to control random seed chosen for tree construction INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - Algo - algorithm type: - * 0 complete linkage (default algorithm) - * 1 single linkage - * 2 unweighted average linkage - * 3 weighted average linkage - * 4 Ward's method + none -NOTE: Ward's method works correctly only with Euclidean distance, that's - why algorithm will return negative termination code (failure) for - any other distance type. - - It is possible, however, to use this method with user-supplied - distance matrix. It is your responsibility to pass one which was - calculated with Euclidean distance function. +OUTPUT PARAMETERS: + S - decision forest builder -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void clusterizersetahcalgo(clusterizerstate* s, - ae_int_t algo, - ae_state *_state) +void dfbuildercreate(decisionforestbuilder &s, const xparams _xparams) { - - - ae_assert((((algo==0||algo==1)||algo==2)||algo==3)||algo==4, "ClusterizerSetHCAlgo: incorrect algorithm type", _state); - s->ahcalgo = algo; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildercreate(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function sets k-means properties: number of restarts and maximum -number of iterations per one run. +This subroutine adds dense dataset to the internal storage of the builder +object. Specifying your dataset in the dense format means that the dense +version of the forest construction algorithm will be invoked. + +INPUT PARAMETERS: + S - decision forest builder object + XY - array[NPoints,NVars+1] (minimum size; actual size can + be larger, only leading part is used anyway), dataset: + * first NVars elements of each row store values of the + independent variables + * last column store class number (in 0...NClasses-1) + or real value of the dependent variable + NPoints - number of rows in the dataset, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NClasses - indicates type of the problem being solved: + * NClasses>=2 means that classification problem is + solved (last column of the dataset stores class + number) + * NClasses=1 means that regression problem is solved + (last column of the dataset stores variable value) -INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - Restarts- restarts count, >=1. - k-means++ algorithm performs several restarts and chooses - best set of centers (one with minimum squared distance). - MaxIts - maximum number of k-means iterations performed during one - run. >=0, zero value means that algorithm performs unlimited - number of iterations. +OUTPUT PARAMETERS: + S - decision forest builder -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void clusterizersetkmeanslimits(clusterizerstate* s, - ae_int_t restarts, - ae_int_t maxits, - ae_state *_state) +void dfbuildersetdataset(const decisionforestbuilder &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const xparams _xparams) { - - - ae_assert(restarts>=1, "ClusterizerSetKMeansLimits: Restarts<=0", _state); - ae_assert(maxits>=0, "ClusterizerSetKMeansLimits: MaxIts<0", _state); - s->kmeansrestarts = restarts; - s->kmeansmaxits = maxits; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetdataset(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nvars, nclasses, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function sets k-means initialization algorithm. Several different -algorithms can be chosen, including k-means++. +This function sets number of variables (in [1,NVars] range) used by +decision forest construction algorithm. + +The default option is to use roughly sqrt(NVars) variables. INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - InitAlgo- initialization algorithm: - * 0 automatic selection ( different versions of ALGLIB - may select different algorithms) - * 1 random initialization - * 2 k-means++ initialization (best quality of initial - centers, but long non-parallelizable initialization - phase with bad cache locality) - * 3 "fast-greedy" algorithm with efficient, easy to - parallelize initialization. Quality of initial centers - is somewhat worse than that of k-means++. This - algorithm is a default one in the current version of - ALGLIB. - *-1 "debug" algorithm which always selects first K rows - of dataset; this algorithm is used for debug purposes - only. Do not use it in the industrial code! + S - decision forest builder object + RndVars - number of randomly selected variables; values outside + of [1,NVars] range are silently clipped. + +OUTPUT PARAMETERS: + S - decision forest builder -- ALGLIB -- - Copyright 21.01.2015 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void clusterizersetkmeansinit(clusterizerstate* s, - ae_int_t initalgo, - ae_state *_state) +void dfbuildersetrndvars(const decisionforestbuilder &s, const ae_int_t rndvars, const xparams _xparams) { - - - ae_assert(initalgo>=-1&&initalgo<=3, "ClusterizerSetKMeansInit: InitAlgo is incorrect", _state); - s->kmeansinitalgo = initalgo; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetrndvars(const_cast(s.c_ptr()), rndvars, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function performs agglomerative hierarchical clustering +This function sets number of variables used by decision forest construction +algorithm as a fraction of total variable count (0,1) range. -COMMERCIAL EDITION OF ALGLIB: - - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Agglomerative hierarchical clustering algorithm has two phases: - ! distance matrix calculation and clustering itself. Only first phase - ! (distance matrix calculation) is accelerated by Intel MKL and multi- - ! threading. Thus, acceleration is significant only for medium or high- - ! dimensional problems. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. +The default option is to use roughly sqrt(NVars) variables. INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() + S - decision forest builder object + F - round(NVars*F) variables are selected OUTPUT PARAMETERS: - Rep - clustering results; see description of AHCReport - structure for more information. - -NOTE 1: hierarchical clustering algorithms require large amounts of memory. - In particular, this implementation needs sizeof(double)*NPoints^2 - bytes, which are used to store distance matrix. In case we work - with user-supplied matrix, this amount is multiplied by 2 (we have - to store original matrix and to work with its copy). - - For example, problem with 10000 points would require 800M of RAM, - even when working in a 1-dimensional space. + S - decision forest builder -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void clusterizerrunahc(clusterizerstate* s, - ahcreport* rep, - ae_state *_state) +void dfbuildersetrndvarsratio(const decisionforestbuilder &s, const double f, const xparams _xparams) { - ae_int_t npoints; - ae_int_t nfeatures; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetrndvarsratio(const_cast(s.c_ptr()), f, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - _ahcreport_clear(rep); +/************************************************************************* +This function tells decision forest builder to automatically choose number +of variables used by decision forest construction algorithm. Roughly +sqrt(NVars) variables will be used. - npoints = s->npoints; - nfeatures = s->nfeatures; - - /* - * Fill Rep.NPoints, quick exit when NPoints<=1 - */ - rep->npoints = npoints; - if( npoints==0 ) +INPUT PARAMETERS: + S - decision forest builder object + +OUTPUT PARAMETERS: + S - decision forest builder + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetrndvarsauto(const decisionforestbuilder &s, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - ae_vector_set_length(&rep->p, 0, _state); - ae_matrix_set_length(&rep->z, 0, 0, _state); - ae_matrix_set_length(&rep->pz, 0, 0, _state); - ae_matrix_set_length(&rep->pm, 0, 0, _state); - ae_vector_set_length(&rep->mergedist, 0, _state); - rep->terminationtype = 1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - if( npoints==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetrndvarsauto(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function sets size of dataset subsample generated the decision forest +construction algorithm. Size is specified as a fraction of total dataset +size. + +The default option is to use 50% of the dataset for training, 50% for the +OOB estimates. You can decrease fraction F down to 10%, 1% or even below +in order to reduce overfitting. + +INPUT PARAMETERS: + S - decision forest builder object + F - fraction of the dataset to use, in (0,1] range. Values + outside of this range will be silently clipped. At + least one element is always selected for the training + set. + +OUTPUT PARAMETERS: + S - decision forest builder + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetsubsampleratio(const decisionforestbuilder &s, const double f, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - ae_vector_set_length(&rep->p, 1, _state); - ae_matrix_set_length(&rep->z, 0, 0, _state); - ae_matrix_set_length(&rep->pz, 0, 0, _state); - ae_matrix_set_length(&rep->pm, 0, 0, _state); - ae_vector_set_length(&rep->mergedist, 0, _state); - rep->p.ptr.p_int[0] = 0; - rep->terminationtype = 1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * More than one point - */ - if( s->disttype==-1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetsubsampleratio(const_cast(s.c_ptr()), f, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function sets seed used by internal RNG for random subsampling and +random selection of variable subsets. + +By default random seed is used, i.e. every time you build decision forest, +we seed generator with new value obtained from system-wide RNG. Thus, +decision forest builder returns non-deterministic results. You can change +such behavior by specyfing fixed positive seed value. + +INPUT PARAMETERS: + S - decision forest builder object + SeedVal - seed value: + * positive values are used for seeding RNG with fixed + seed, i.e. subsequent runs on same data will return + same decision forests + * non-positive seed means that random seed is used + for every run of builder, i.e. subsequent runs on + same datasets will return slightly different + decision forests + +OUTPUT PARAMETERS: + S - decision forest builder, see + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetseed(const decisionforestbuilder &s, const ae_int_t seedval, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Run clusterizer with user-supplied distance matrix - */ - clustering_clusterizerrunahcinternal(s, &s->d, rep, _state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - else + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetseed(const_cast(s.c_ptr()), seedval, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function sets random decision forest construction algorithm. + +As for now, only one decision forest construction algorithm is supported - +a dense "baseline" RDF algorithm. + +INPUT PARAMETERS: + S - decision forest builder object + AlgoType - algorithm type: + * 0 = baseline dense RDF + +OUTPUT PARAMETERS: + S - decision forest builder, see + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetrdfalgo(const decisionforestbuilder &s, const ae_int_t algotype, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Check combination of AHC algo and distance type - */ - if( s->ahcalgo==4&&s->disttype!=2 ) - { - rep->terminationtype = -5; - return; - } - - /* - * Build distance matrix D. - */ - clusterizergetdistancesbuf(&s->distbuf, &s->xy, npoints, nfeatures, s->disttype, &s->tmpd, _state); - - /* - * Run clusterizer - */ - clustering_clusterizerrunahcinternal(s, &s->tmpd, rep, _state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetrdfalgo(const_cast(s.c_ptr()), algotype, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Single-threaded stub. HPC ALGLIB replaces it by multithreaded code. +This function sets split selection algorithm used by decision forest +classifier. You may choose several algorithms, with different speed and +quality of the results. + +INPUT PARAMETERS: + S - decision forest builder object + SplitStrength- split type: + * 0 = split at the random position, fastest one + * 1 = split at the middle of the range + * 2 = strong split at the best point of the range (default) + +OUTPUT PARAMETERS: + S - decision forest builder, see + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void _pexec_clusterizerrunahc(clusterizerstate* s, - ahcreport* rep, ae_state *_state) +void dfbuildersetrdfsplitstrength(const decisionforestbuilder &s, const ae_int_t splitstrength, const xparams _xparams) { - clusterizerrunahc(s,rep, _state); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetrdfsplitstrength(const_cast(s.c_ptr()), splitstrength, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function performs clustering by k-means++ algorithm. +This function tells decision forest construction algorithm to use +Gini impurity based variable importance estimation (also known as MDI). -You may change algorithm properties by calling: -* ClusterizerSetKMeansLimits() to change number of restarts or iterations -* ClusterizerSetKMeansInit() to change initialization algorithm +This version of importance estimation algorithm analyzes mean decrease in +impurity (MDI) on training sample during splits. The result is divided +by impurity at the root node in order to produce estimate in [0,1] range. -By default, one restart and unlimited number of iterations are used. -Initialization algorithm is chosen automatically. +Such estimates are fast to calculate and beautifully normalized (sum to +one) but have following downsides: +* They ALWAYS sum to 1.0, even if output is completely unpredictable. I.e. + MDI allows to order variables by importance, but does not tell us about + "absolute" importances of variables +* there exist some bias towards continuous and high-cardinality categorical + variables -COMMERCIAL EDITION OF ALGLIB: +NOTE: informally speaking, MDA (permutation importance) rating answers the + question "what part of the model predictive power is ruined by + permuting k-th variable?" while MDI tells us "what part of the model + predictive power was achieved due to usage of k-th variable". - ! Commercial version of ALGLIB includes two important improvements of - ! this function: - ! * multicore support (can be used from C# and C++) - ! * access to high-performance C++ core (actual for C# users) - ! - ! K-means clustering algorithm has two phases: selection of initial - ! centers and clustering itself. ALGLIB parallelizes both phases. - ! Parallel version is optimized for the following scenario: medium or - ! high-dimensional problem (20 or more dimensions) with large number of - ! points and clusters. However, some speed-up can be obtained even when - ! assumptions above are violated. - ! - ! As for native-vs-managed comparison, working with native core brings - ! 30-40% improvement in speed over pure C# version of ALGLIB. - ! - ! We recommend you to read 'Working with commercial version' section of - ! ALGLIB Reference Manual in order to find out how to use performance- - ! related features provided by commercial edition of ALGLIB. + Thus, MDA rates each variable independently at "0 to 1" scale while + MDI (and OOB-MDI too) tends to divide "unit amount of importance" + between several important variables. + + If all variables are equally important, they will have same + MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to 1/NVars. + However, roughly same picture will be produced for the "all + variables provide information no one is critical" situation and for + the "all variables are critical, drop any one, everything is ruined" + situation. + + Contrary to that, MDA will rate critical variable as ~1.0 important, + and important but non-critical variable will have less than unit + rating. + +NOTE: quite an often MDA and MDI return same results. It generally happens + on problems with low test set error (a few percents at most) and + large enough training set to avoid overfitting. + + The difference between MDA, MDI and OOB-MDI becomes important only + on "hard" tasks with high test set error and/or small training set. INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - K - number of clusters, K>=0. - K can be zero only when algorithm is called for empty - dataset, in this case completion code is set to - success (+1). - If K=0 and dataset size is non-zero, we can not - meaningfully assign points to some center (there are no - centers because K=0) and return -3 as completion code - (failure). + S - decision forest builder object OUTPUT PARAMETERS: - Rep - clustering results; see description of KMeansReport - structure for more information. - -NOTE 1: k-means clustering can be performed only for datasets with - Euclidean distance function. Algorithm will return negative - completion code in Rep.TerminationType in case dataset was added - to clusterizer with DistType other than Euclidean (or dataset was - specified by distance matrix instead of explicitly given points). + S - decision forest builder object. Next call to the forest + construction function will produce: + * importance estimates in rep.varimportances field + * variable ranks in rep.topvars field -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 29.07.2019 by Bochkanov Sergey *************************************************************************/ -void clusterizerrunkmeans(clusterizerstate* s, - ae_int_t k, - kmeansreport* rep, - ae_state *_state) +void dfbuildersetimportancetrngini(const decisionforestbuilder &s, const xparams _xparams) { - ae_frame _frame_block; - ae_matrix dummy; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetimportancetrngini(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - ae_frame_make(_state, &_frame_block); - _kmeansreport_clear(rep); - ae_matrix_init(&dummy, 0, 0, DT_REAL, _state); +/************************************************************************* +This function tells decision forest construction algorithm to use +out-of-bag version of Gini variable importance estimation (also known as +OOB-MDI). - ae_assert(k>=0, "ClusterizerRunKMeans: K<0", _state); - - /* - * Incorrect distance type - */ - if( s->disttype!=2 ) +This version of importance estimation algorithm analyzes mean decrease in +impurity (MDI) on out-of-bag sample during splits. The result is divided +by impurity at the root node in order to produce estimate in [0,1] range. + +Such estimates are fast to calculate and resistant to overfitting issues +(thanks to the out-of-bag estimates used). However, OOB Gini rating has +following downsides: +* there exist some bias towards continuous and high-cardinality categorical + variables +* Gini rating allows us to order variables by importance, but it is hard + to define importance of the variable by itself. + +NOTE: informally speaking, MDA (permutation importance) rating answers the + question "what part of the model predictive power is ruined by + permuting k-th variable?" while MDI tells us "what part of the model + predictive power was achieved due to usage of k-th variable". + + Thus, MDA rates each variable independently at "0 to 1" scale while + MDI (and OOB-MDI too) tends to divide "unit amount of importance" + between several important variables. + + If all variables are equally important, they will have same + MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to 1/NVars. + However, roughly same picture will be produced for the "all + variables provide information no one is critical" situation and for + the "all variables are critical, drop any one, everything is ruined" + situation. + + Contrary to that, MDA will rate critical variable as ~1.0 important, + and important but non-critical variable will have less than unit + rating. + +NOTE: quite an often MDA and MDI return same results. It generally happens + on problems with low test set error (a few percents at most) and + large enough training set to avoid overfitting. + + The difference between MDA, MDI and OOB-MDI becomes important only + on "hard" tasks with high test set error and/or small training set. + +INPUT PARAMETERS: + S - decision forest builder object + +OUTPUT PARAMETERS: + S - decision forest builder object. Next call to the forest + construction function will produce: + * importance estimates in rep.varimportances field + * variable ranks in rep.topvars field + + -- ALGLIB -- + Copyright 29.07.2019 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetimportanceoobgini(const decisionforestbuilder &s, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - rep->npoints = s->npoints; - rep->terminationtype = -5; - rep->k = k; - rep->iterationscount = 0; - rep->energy = 0.0; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * K>NPoints or (K=0 and NPoints>0) - */ - if( k>s->npoints||(k==0&&s->npoints>0) ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetimportanceoobgini(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function tells decision forest construction algorithm to use +permutation variable importance estimator (also known as MDA). + +This version of importance estimation algorithm analyzes mean increase in +out-of-bag sum of squared residuals after random permutation of J-th +variable. The result is divided by error computed with all variables being +perturbed in order to produce R-squared-like estimate in [0,1] range. + +Such estimate is slower to calculate than Gini-based rating because it +needs multiple inference runs for each of variables being studied. + +ALGLIB uses parallelized and highly optimized algorithm which analyzes +path through the decision tree and allows to handle most perturbations +in O(1) time; nevertheless, requesting MDA importances may increase forest +construction time from 10% to 200% (or more, if you have thousands of +variables). + +However, MDA rating has following benefits over Gini-based ones: +* no bias towards specific variable types +* ability to directly evaluate "absolute" importance of some variable at + "0 to 1" scale (contrary to Gini-based rating, which returns comparative + importances). + +NOTE: informally speaking, MDA (permutation importance) rating answers the + question "what part of the model predictive power is ruined by + permuting k-th variable?" while MDI tells us "what part of the model + predictive power was achieved due to usage of k-th variable". + + Thus, MDA rates each variable independently at "0 to 1" scale while + MDI (and OOB-MDI too) tends to divide "unit amount of importance" + between several important variables. + + If all variables are equally important, they will have same + MDI/OOB-MDI rating, equal (for OOB-MDI: roughly equal) to 1/NVars. + However, roughly same picture will be produced for the "all + variables provide information no one is critical" situation and for + the "all variables are critical, drop any one, everything is ruined" + situation. + + Contrary to that, MDA will rate critical variable as ~1.0 important, + and important but non-critical variable will have less than unit + rating. + +NOTE: quite an often MDA and MDI return same results. It generally happens + on problems with low test set error (a few percents at most) and + large enough training set to avoid overfitting. + + The difference between MDA, MDI and OOB-MDI becomes important only + on "hard" tasks with high test set error and/or small training set. + +INPUT PARAMETERS: + S - decision forest builder object + +OUTPUT PARAMETERS: + S - decision forest builder object. Next call to the forest + construction function will produce: + * importance estimates in rep.varimportances field + * variable ranks in rep.topvars field + + -- ALGLIB -- + Copyright 29.07.2019 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetimportancepermutation(const decisionforestbuilder &s, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - rep->npoints = s->npoints; - rep->terminationtype = -3; - rep->k = k; - rep->iterationscount = 0; - rep->energy = 0.0; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * No points - */ - if( s->npoints==0 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetimportancepermutation(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function tells decision forest construction algorithm to skip +variable importance estimation. + +INPUT PARAMETERS: + S - decision forest builder object + +OUTPUT PARAMETERS: + S - decision forest builder object. Next call to the forest + construction function will result in forest being built + without variable importance estimation. + + -- ALGLIB -- + Copyright 29.07.2019 by Bochkanov Sergey +*************************************************************************/ +void dfbuildersetimportancenone(const decisionforestbuilder &s, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - rep->npoints = 0; - rep->terminationtype = 1; - rep->k = k; - rep->iterationscount = 0; - rep->energy = 0.0; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * Normal case: - * 1<=K<=NPoints, Euclidean distance - */ - rep->npoints = s->npoints; - rep->nfeatures = s->nfeatures; - rep->k = k; - rep->npoints = s->npoints; - rep->nfeatures = s->nfeatures; - kmeansgenerateinternal(&s->xy, s->npoints, s->nfeatures, k, s->kmeansinitalgo, s->kmeansmaxits, s->kmeansrestarts, s->kmeansdbgnoits, &rep->terminationtype, &rep->iterationscount, &dummy, ae_false, &rep->c, ae_true, &rep->cidx, &rep->energy, &s->kmeanstmp, _state); - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildersetimportancenone(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -Single-threaded stub. HPC ALGLIB replaces it by multithreaded code. +This function is an alias for dfbuilderpeekprogress(), left in ALGLIB for +backward compatibility reasons. + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void _pexec_clusterizerrunkmeans(clusterizerstate* s, - ae_int_t k, - kmeansreport* rep, ae_state *_state) +double dfbuildergetprogress(const decisionforestbuilder &s, const xparams _xparams) { - clusterizerrunkmeans(s,k,rep, _state); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfbuildergetprogress(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } - /************************************************************************* -This function returns distance matrix for dataset +This function is used to peek into decision forest construction process +from some other thread and get current progress indicator. -COMMERCIAL EDITION OF ALGLIB: +It returns value in [0,1]. - ! Commercial version of ALGLIB includes two important improvements of - ! this function, which can be used from C++ and C#: - ! * Intel MKL support (lightweight Intel MKL is shipped with ALGLIB) - ! * multicore support - ! - ! Agglomerative hierarchical clustering algorithm has two phases: - ! distance matrix calculation and clustering itself. Only first phase - ! (distance matrix calculation) is accelerated by Intel MKL and multi- - ! threading. Thus, acceleration is significant only for medium or high- - ! dimensional problems. +INPUT PARAMETERS: + S - decision forest builder object used to build forest + in some other thread + +RESULT: + progress value, in [0,1] + + -- ALGLIB -- + Copyright 21.05.2018 by Bochkanov Sergey +*************************************************************************/ +double dfbuilderpeekprogress(const decisionforestbuilder &s, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfbuilderpeekprogress(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This subroutine builds decision forest according to current settings using +dataset internally stored in the builder object. Dense algorithm is used. + +NOTE: this function uses dense algorithm for forest construction + independently from the dataset format (dense or sparse). + +NOTE: forest built with this function is stored in-memory using 64-bit + data structures for offsets/indexes/split values. It is possible to + convert forest into more memory-efficient compressed binary + representation. Depending on the problem properties, 3.7x-5.7x + compression factors are possible. + + The downsides of compression are (a) slight reduction in the model + accuracy and (b) ~1.5x reduction in the inference speed (due to + increased complexity of the storage format). + + See comments on dfbinarycompression() for more info. + +Default settings are used by the algorithm; you can tweak them with the +help of the following functions: +* dfbuildersetrfactor() - to control a fraction of the dataset used for + subsampling +* dfbuildersetrandomvars() - to control number of variables randomly chosen + for decision rule creation + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) ! ! We recommend you to read 'Working with commercial version' section of ! ALGLIB Reference Manual in order to find out how to use performance- ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - XY - array[NPoints,NFeatures], dataset - NPoints - number of points, >=0 - NFeatures- number of features, >=1 - DistType- distance function: - * 0 Chebyshev distance (L-inf norm) - * 1 city block distance (L1 norm) - * 2 Euclidean distance (L2 norm, non-squared) - * 10 Pearson correlation: - dist(a,b) = 1-corr(a,b) - * 11 Absolute Pearson correlation: - dist(a,b) = 1-|corr(a,b)| - * 12 Uncentered Pearson correlation (cosine of the angle): - dist(a,b) = a'*b/(|a|*|b|) - * 13 Absolute uncentered Pearson correlation - dist(a,b) = |a'*b|/(|a|*|b|) - * 20 Spearman rank correlation: - dist(a,b) = 1-rankcorr(a,b) - * 21 Absolute Spearman rank correlation - dist(a,b) = 1-|rankcorr(a,b)| + S - decision forest builder object + NTrees - NTrees>=1, number of trees to train OUTPUT PARAMETERS: - D - array[NPoints,NPoints], distance matrix - (full matrix is returned, with lower and upper triangles) + DF - decision forest. You can compress this forest to more + compact 16-bit representation with dfbinarycompression() + Rep - report, see below for information on its fields. -NOTE: different distance functions have different performance penalty: - * Euclidean or Pearson correlation distances are the fastest ones - * Spearman correlation distance function is a bit slower - * city block and Chebyshev distances are order of magnitude slower - - The reason behing difference in performance is that correlation-based - distance functions are computed using optimized linear algebra kernels, - while Chebyshev and city block distance functions are computed using - simple nested loops with two branches at each iteration. +=== report information produced by forest construction function ========== + +Decision forest training report includes following information: +* training set errors +* out-of-bag estimates of errors +* variable importance ratings + +Following fields are used to store information: +* training set errors are stored in rep.relclserror, rep.avgce, rep.rmserror, + rep.avgerror and rep.avgrelerror +* out-of-bag estimates of errors are stored in rep.oobrelclserror, rep.oobavgce, + rep.oobrmserror, rep.oobavgerror and rep.oobavgrelerror + +Variable importance reports, if requested by dfbuildersetimportancegini(), +dfbuildersetimportancetrngini() or dfbuildersetimportancepermutation() +call, are stored in: +* rep.varimportances field stores importance ratings +* rep.topvars stores variable indexes ordered from the most important to + less important ones + +You can find more information about report fields in: +* comments on dfreport structure +* comments on dfbuildersetimportancegini function +* comments on dfbuildersetimportancetrngini function +* comments on dfbuildersetimportancepermutation function -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 21.05.2018 by Bochkanov Sergey *************************************************************************/ -void clusterizergetdistances(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nfeatures, - ae_int_t disttype, - /* Real */ ae_matrix* d, - ae_state *_state) +void dfbuilderbuildrandomforest(const decisionforestbuilder &s, const ae_int_t ntrees, decisionforest &df, dfreport &rep, const xparams _xparams) { - ae_frame _frame_block; - apbuffers buf; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuilderbuildrandomforest(const_cast(s.c_ptr()), ntrees, const_cast(df.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - ae_frame_make(_state, &_frame_block); - ae_matrix_clear(d); - _apbuffers_init(&buf, _state); +/************************************************************************* +This function performs binary compression of the decision forest. - ae_assert(nfeatures>=1, "ClusterizerGetDistances: NFeatures<1", _state); - ae_assert(npoints>=0, "ClusterizerGetDistances: NPoints<1", _state); - ae_assert((((((((disttype==0||disttype==1)||disttype==2)||disttype==10)||disttype==11)||disttype==12)||disttype==13)||disttype==20)||disttype==21, "ClusterizerGetDistances: incorrect DistType", _state); - ae_assert(xy->rows>=npoints, "ClusterizerGetDistances: Rows(XY)cols>=nfeatures, "ClusterizerGetDistances: Cols(XY)(df.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } - /************************************************************************* -Buffered version of ClusterizerGetDistances() which reuses previously -allocated space. +Inference using decision forest + +IMPORTANT: this function is thread-unsafe and may modify internal + structures of the model! You can not use same model object for + parallel evaluation from several threads. + + Use dftsprocess() with independent thread-local buffers if + you need thread-safe evaluation. + +INPUT PARAMETERS: + DF - decision forest model + X - input vector, array[NVars] + Y - possibly preallocated buffer, reallocated if too small + +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + +See also DFProcessI. + -- ALGLIB -- - Copyright 29.05.2015 by Bochkanov Sergey + Copyright 16.02.2009 by Bochkanov Sergey *************************************************************************/ -void clusterizergetdistancesbuf(apbuffers* buf, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nfeatures, - ae_int_t disttype, - /* Real */ ae_matrix* d, - ae_state *_state) +void dfprocess(const decisionforest &df, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { - ae_int_t i; - ae_int_t j; - double v; - double vv; - double vr; - - - ae_assert(nfeatures>=1, "ClusterizerGetDistancesBuf: NFeatures<1", _state); - ae_assert(npoints>=0, "ClusterizerGetDistancesBuf: NPoints<1", _state); - ae_assert((((((((disttype==0||disttype==1)||disttype==2)||disttype==10)||disttype==11)||disttype==12)||disttype==13)||disttype==20)||disttype==21, "ClusterizerGetDistancesBuf: incorrect DistType", _state); - ae_assert(xy->rows>=npoints, "ClusterizerGetDistancesBuf: Rows(XY)cols>=nfeatures, "ClusterizerGetDistancesBuf: Cols(XY)(df.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +'interactive' variant of DFProcess for languages like Python which support +constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter + +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. + +IMPORTANT: this function is thread-unsafe and may modify internal + structures of the model! You can not use same model object for + parallel evaluation from several threads. + + Use dftsprocess() with independent thread-local buffers if + you need thread-safe evaluation. + + -- ALGLIB -- + Copyright 28.02.2010 by Bochkanov Sergey +*************************************************************************/ +void dfprocessi(const decisionforest &df, const real_1d_array &x, real_1d_array &y, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - rmatrixsetlengthatleast(d, 1, 1, _state); - d->ptr.pp_double[0][0] = (double)(0); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * Build distance matrix D. - */ - if( disttype==0||disttype==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfprocessi(const_cast(df.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function returns first component of the inferred vector (i.e. one +with index #0). + +It is a convenience wrapper for dfprocess() intended for either: +* 1-dimensional regression problems +* 2-class classification problems + +In the former case this function returns inference result as scalar, which +is definitely more convenient that wrapping it as vector. In the latter +case it returns probability of object belonging to class #0. + +If you call it for anything different from two cases above, it will work +as defined, i.e. return y[0], although it is of less use in such cases. + +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. + + Use dftsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. + +INPUT PARAMETERS: + Model - DF model + X - input vector, array[0..NVars-1]. + +RESULT: + Y[0] + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +double dfprocess0(const decisionforest &model, const real_1d_array &x, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Chebyshev or city-block distances: - * * recursively calculate upper triangle (with main diagonal) - * * copy it to the bottom part of the matrix - */ - rmatrixsetlengthatleast(d, npoints, npoints, _state); - clustering_evaluatedistancematrixrec(xy, nfeatures, disttype, d, 0, npoints, 0, npoints, _state); - rmatrixenforcesymmetricity(d, npoints, ae_true, _state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - if( disttype==2 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfprocess0(const_cast(model.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +This function returns most probable class number for an input X. It is +same as calling dfprocess(model,x,y), then determining i=argmax(y[i]) and +returning i. + +A class number in [0,NOut) range in returned for classification problems, +-1 is returned when this function is called for regression problems. + +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. + + Use dftsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. + +INPUT PARAMETERS: + Model - decision forest model + X - input vector, array[0..NVars-1]. + +RESULT: + class number, -1 for regression tasks + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +ae_int_t dfclassify(const decisionforest &model, const real_1d_array &x, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Euclidean distance - * - * NOTE: parallelization is done within RMatrixSYRK - */ - rmatrixsetlengthatleast(d, npoints, npoints, _state); - rmatrixsetlengthatleast(&buf->rm0, npoints, nfeatures, _state); - rvectorsetlengthatleast(&buf->ra1, nfeatures, _state); - rvectorsetlengthatleast(&buf->ra0, npoints, _state); - for(j=0; j<=nfeatures-1; j++) - { - buf->ra1.ptr.p_double[j] = 0.0; - } - v = (double)1/(double)npoints; - for(i=0; i<=npoints-1; i++) - { - ae_v_addd(&buf->ra1.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nfeatures-1), v); - } - for(i=0; i<=npoints-1; i++) - { - ae_v_move(&buf->rm0.ptr.pp_double[i][0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nfeatures-1)); - ae_v_sub(&buf->rm0.ptr.pp_double[i][0], 1, &buf->ra1.ptr.p_double[0], 1, ae_v_len(0,nfeatures-1)); - } - rmatrixsyrk(npoints, nfeatures, 1.0, &buf->rm0, 0, 0, 0, 0.0, d, 0, 0, ae_true, _state); - for(i=0; i<=npoints-1; i++) - { - buf->ra0.ptr.p_double[i] = d->ptr.pp_double[i][i]; - } - for(i=0; i<=npoints-1; i++) - { - d->ptr.pp_double[i][i] = 0.0; - for(j=i+1; j<=npoints-1; j++) - { - v = ae_sqrt(ae_maxreal(buf->ra0.ptr.p_double[i]+buf->ra0.ptr.p_double[j]-2*d->ptr.pp_double[i][j], 0.0, _state), _state); - d->ptr.pp_double[i][j] = v; - } - } - rmatrixenforcesymmetricity(d, npoints, ae_true, _state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - if( disttype==10||disttype==11 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::dfclassify(const_cast(model.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +Inference using decision forest + +Thread-safe procesing using external buffer for temporaries. + +This function is thread-safe (i.e . you can use same DF model from +multiple threads) as long as you use different buffer objects for different +threads. + +INPUT PARAMETERS: + DF - decision forest model + Buf - buffer object, must be allocated specifically for this + model with dfcreatebuffer(). + X - input vector, array[NVars] + Y - possibly preallocated buffer, reallocated if too small + +OUTPUT PARAMETERS: + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. + +See also DFProcessI. + + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +void dftsprocess(const decisionforest &df, const decisionforestbuffer &buf, const real_1d_array &x, real_1d_array &y, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Absolute/nonabsolute Pearson correlation distance - * - * NOTE: parallelization is done within PearsonCorrM, which calls RMatrixSYRK internally - */ - rmatrixsetlengthatleast(d, npoints, npoints, _state); - rvectorsetlengthatleast(&buf->ra0, npoints, _state); - rmatrixsetlengthatleast(&buf->rm0, npoints, nfeatures, _state); - for(i=0; i<=npoints-1; i++) - { - v = 0.0; - for(j=0; j<=nfeatures-1; j++) - { - v = v+xy->ptr.pp_double[i][j]; - } - v = v/nfeatures; - for(j=0; j<=nfeatures-1; j++) - { - buf->rm0.ptr.pp_double[i][j] = xy->ptr.pp_double[i][j]-v; - } - } - rmatrixsyrk(npoints, nfeatures, 1.0, &buf->rm0, 0, 0, 0, 0.0, d, 0, 0, ae_true, _state); - for(i=0; i<=npoints-1; i++) - { - buf->ra0.ptr.p_double[i] = d->ptr.pp_double[i][i]; - } - for(i=0; i<=npoints-1; i++) - { - d->ptr.pp_double[i][i] = 0.0; - for(j=i+1; j<=npoints-1; j++) - { - v = d->ptr.pp_double[i][j]/ae_sqrt(buf->ra0.ptr.p_double[i]*buf->ra0.ptr.p_double[j], _state); - if( disttype==10 ) - { - v = 1-v; - } - else - { - v = 1-ae_fabs(v, _state); - } - v = ae_maxreal(v, 0.0, _state); - d->ptr.pp_double[i][j] = v; - } - } - rmatrixenforcesymmetricity(d, npoints, ae_true, _state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - if( disttype==12||disttype==13 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dftsprocess(const_cast(df.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Relative classification error on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + percent of incorrectly classified cases. + Zero if model solves regression task. + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +double dfrelclserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Absolute/nonabsolute uncentered Pearson correlation distance - * - * NOTE: parallelization is done within RMatrixSYRK - */ - rmatrixsetlengthatleast(d, npoints, npoints, _state); - rvectorsetlengthatleast(&buf->ra0, npoints, _state); - rmatrixsyrk(npoints, nfeatures, 1.0, xy, 0, 0, 0, 0.0, d, 0, 0, ae_true, _state); - for(i=0; i<=npoints-1; i++) - { - buf->ra0.ptr.p_double[i] = d->ptr.pp_double[i][i]; - } - for(i=0; i<=npoints-1; i++) - { - d->ptr.pp_double[i][i] = 0.0; - for(j=i+1; j<=npoints-1; j++) - { - v = d->ptr.pp_double[i][j]/ae_sqrt(buf->ra0.ptr.p_double[i]*buf->ra0.ptr.p_double[j], _state); - if( disttype==13 ) - { - v = ae_fabs(v, _state); - } - v = ae_minreal(v, 1.0, _state); - d->ptr.pp_double[i][j] = 1-v; - } - } - rmatrixenforcesymmetricity(d, npoints, ae_true, _state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - if( disttype==20||disttype==21 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfrelclserror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +Average cross-entropy (in bits per element) on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + CrossEntropy/(NPoints*LN(2)). + Zero if model solves regression task. + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +double dfavgce(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Spearman rank correlation - * - * NOTE: parallelization of correlation matrix is done within - * PearsonCorrM, which calls RMatrixSYRK internally - */ - rmatrixsetlengthatleast(d, npoints, npoints, _state); - rvectorsetlengthatleast(&buf->ra0, npoints, _state); - rmatrixsetlengthatleast(&buf->rm0, npoints, nfeatures, _state); - rmatrixcopy(npoints, nfeatures, xy, 0, 0, &buf->rm0, 0, 0, _state); - rankdatacentered(&buf->rm0, npoints, nfeatures, _state); - rmatrixsyrk(npoints, nfeatures, 1.0, &buf->rm0, 0, 0, 0, 0.0, d, 0, 0, ae_true, _state); - for(i=0; i<=npoints-1; i++) - { - if( ae_fp_greater(d->ptr.pp_double[i][i],(double)(0)) ) - { - buf->ra0.ptr.p_double[i] = 1/ae_sqrt(d->ptr.pp_double[i][i], _state); - } - else - { - buf->ra0.ptr.p_double[i] = 0.0; - } - } - for(i=0; i<=npoints-1; i++) - { - v = buf->ra0.ptr.p_double[i]; - d->ptr.pp_double[i][i] = 0.0; - for(j=i+1; j<=npoints-1; j++) - { - vv = d->ptr.pp_double[i][j]*v*buf->ra0.ptr.p_double[j]; - if( disttype==20 ) - { - vr = 1-vv; - } - else - { - vr = 1-ae_fabs(vv, _state); - } - if( ae_fp_less(vr,(double)(0)) ) - { - vr = 0.0; - } - d->ptr.pp_double[i][j] = vr; - } - } - rmatrixenforcesymmetricity(d, npoints, ae_true, _state); - return; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - ae_assert(ae_false, "Assertion failed", _state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfavgce(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +/************************************************************************* +RMS error on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + root mean square error. + Its meaning for regression task is obvious. As for + classification task, RMS error means error when estimating posterior + probabilities. + + -- ALGLIB -- + Copyright 16.02.2009 by Bochkanov Sergey +*************************************************************************/ +double dfrmserror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfrmserror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} /************************************************************************* -This function takes as input clusterization report Rep, desired clusters -count K, and builds top K clusters from hierarchical clusterization tree. -It returns assignment of points to clusters (array of cluster indexes). +Average error on the test set INPUT PARAMETERS: - Rep - report from ClusterizerRunAHC() performed on XY - K - desired number of clusters, 1<=K<=NPoints. - K can be zero only when NPoints=0. + DF - decision forest model + XY - test set + NPoints - test set size -OUTPUT PARAMETERS: - CIdx - array[NPoints], I-th element contains cluster index (from - 0 to K-1) for I-th point of the dataset. - CZ - array[K]. This array allows to convert cluster indexes - returned by this function to indexes used by Rep.Z. J-th - cluster returned by this function corresponds to CZ[J]-th - cluster stored in Rep.Z/PZ/PM. - It is guaranteed that CZ[I](df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +Average relative error on the test set + +INPUT PARAMETERS: + DF - decision forest model + XY - test set + NPoints - test set size + +RESULT: + Its meaning for regression task is obvious. As for + classification task, it means average relative error when estimating + posterior probability of belonging to the correct class. -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 16.02.2009 by Bochkanov Sergey *************************************************************************/ -void clusterizergetkclusters(ahcreport* rep, - ae_int_t k, - /* Integer */ ae_vector* cidx, - /* Integer */ ae_vector* cz, - ae_state *_state) +double dfavgrelerror(const decisionforest &df, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t mergeidx; - ae_int_t i0; - ae_int_t i1; - ae_int_t t; - ae_vector presentclusters; - ae_vector clusterindexes; - ae_vector clustersizes; - ae_vector tmpidx; - ae_int_t npoints; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::dfavgrelerror(const_cast(df.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} - ae_frame_make(_state, &_frame_block); - ae_vector_clear(cidx); - ae_vector_clear(cz); - ae_vector_init(&presentclusters, 0, DT_BOOL, _state); - ae_vector_init(&clusterindexes, 0, DT_INT, _state); - ae_vector_init(&clustersizes, 0, DT_INT, _state); - ae_vector_init(&tmpidx, 0, DT_INT, _state); +/************************************************************************* +This subroutine builds random decision forest. - npoints = rep->npoints; - ae_assert(npoints>=0, "ClusterizerGetKClusters: internal error in Rep integrity", _state); - ae_assert(k>=0, "ClusterizerGetKClusters: K<=0", _state); - ae_assert(k<=npoints, "ClusterizerGetKClusters: K>NPoints", _state); - ae_assert(k>0||npoints==0, "ClusterizerGetKClusters: K<=0", _state); - ae_assert(npoints==rep->npoints, "ClusterizerGetKClusters: NPoints<>Rep.NPoints", _state); - - /* - * Quick exit - */ - if( npoints==0 ) +--------- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT --------- + + -- ALGLIB -- + Copyright 19.02.2009 by Bochkanov Sergey +*************************************************************************/ +void dfbuildrandomdecisionforest(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const double r, ae_int_t &info, decisionforest &df, dfreport &rep, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - if( npoints==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildrandomdecisionforest(const_cast(xy.c_ptr()), npoints, nvars, nclasses, ntrees, r, &info, const_cast(df.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This subroutine builds random decision forest. + +--------- DEPRECATED VERSION! USE DECISION FOREST BUILDER OBJECT --------- + + -- ALGLIB -- + Copyright 19.02.2009 by Bochkanov Sergey +*************************************************************************/ +void dfbuildrandomdecisionforestx1(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const ae_int_t ntrees, const ae_int_t nrndvars, const double r, ae_int_t &info, decisionforest &df, dfreport &rep, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - ae_vector_set_length(cz, 1, _state); - ae_vector_set_length(cidx, 1, _state); - cz->ptr.p_int[0] = 0; - cidx->ptr.p_int[0] = 0; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::dfbuildrandomdecisionforestx1(const_cast(xy.c_ptr()), npoints, nvars, nclasses, ntrees, nrndvars, r, &info, const_cast(df.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +#endif + +#if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +Buffer object which is used to perform various requests (usually model +inference) in the multithreaded mode (multiple threads working with same +KNN object). + +This object should be created with KNNCreateBuffer(). +*************************************************************************/ +_knnbuffer_owner::_knnbuffer_owner() +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Replay merges, from top to bottom, - * keep track of clusters being present at the moment - */ - ae_vector_set_length(&presentclusters, 2*npoints-1, _state); - ae_vector_set_length(&tmpidx, npoints, _state); - for(i=0; i<=2*npoints-3; i++) - { - presentclusters.ptr.p_bool[i] = ae_false; - } - presentclusters.ptr.p_bool[2*npoints-2] = ae_true; - for(i=0; i<=npoints-1; i++) - { - tmpidx.ptr.p_int[i] = 2*npoints-2; - } - for(mergeidx=npoints-2; mergeidx>=npoints-k; mergeidx--) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - - /* - * Update information about clusters being present at the moment - */ - presentclusters.ptr.p_bool[npoints+mergeidx] = ae_false; - presentclusters.ptr.p_bool[rep->z.ptr.pp_int[mergeidx][0]] = ae_true; - presentclusters.ptr.p_bool[rep->z.ptr.pp_int[mergeidx][1]] = ae_true; - - /* - * Update TmpIdx according to the current state of the dataset - * - * NOTE: TmpIdx contains cluster indexes from [0..2*NPoints-2]; - * we will convert them to [0..K-1] later. - */ - i0 = rep->pm.ptr.pp_int[mergeidx][0]; - i1 = rep->pm.ptr.pp_int[mergeidx][1]; - t = rep->z.ptr.pp_int[mergeidx][0]; - for(i=i0; i<=i1; i++) - { - tmpidx.ptr.p_int[i] = t; - } - i0 = rep->pm.ptr.pp_int[mergeidx][2]; - i1 = rep->pm.ptr.pp_int[mergeidx][3]; - t = rep->z.ptr.pp_int[mergeidx][1]; - for(i=i0; i<=i1; i++) + if( p_struct!=NULL ) { - tmpidx.ptr.p_int[i] = t; + alglib_impl::_knnbuffer_destroy(p_struct); + alglib_impl::ae_free(p_struct); } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::knnbuffer*)alglib_impl::ae_malloc(sizeof(alglib_impl::knnbuffer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::knnbuffer)); + alglib_impl::_knnbuffer_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); +} + +_knnbuffer_owner::_knnbuffer_owner(const _knnbuffer_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Fill CZ - array which allows us to convert cluster indexes - * from one system to another. - */ - ae_vector_set_length(cz, k, _state); - ae_vector_set_length(&clusterindexes, 2*npoints-1, _state); - t = 0; - for(i=0; i<=2*npoints-2; i++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - if( presentclusters.ptr.p_bool[i] ) + if( p_struct!=NULL ) { - cz->ptr.p_int[t] = i; - clusterindexes.ptr.p_int[i] = t; - t = t+1; + alglib_impl::_knnbuffer_destroy(p_struct); + alglib_impl::ae_free(p_struct); } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - ae_assert(t==k, "ClusterizerGetKClusters: internal error", _state); + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnbuffer copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::knnbuffer*)alglib_impl::ae_malloc(sizeof(alglib_impl::knnbuffer), &_state); + memset(p_struct, 0, sizeof(alglib_impl::knnbuffer)); + alglib_impl::_knnbuffer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_knnbuffer_owner& _knnbuffer_owner::operator=(const _knnbuffer_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Convert indexes stored in CIdx - */ - ae_vector_set_length(cidx, npoints, _state); - for(i=0; i<=npoints-1; i++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - cidx->ptr.p_int[i] = clusterindexes.ptr.p_int[tmpidx.ptr.p_int[rep->p.ptr.p_int[i]]]; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - ae_frame_leave(_state); + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: knnbuffer assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnbuffer assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_knnbuffer_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::knnbuffer)); + alglib_impl::_knnbuffer_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; } +_knnbuffer_owner::~_knnbuffer_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_knnbuffer_destroy(p_struct); + ae_free(p_struct); + } +} -/************************************************************************* -This function accepts AHC report Rep, desired minimum intercluster -distance and returns top clusters from hierarchical clusterization tree -which are separated by distance R or HIGHER. +alglib_impl::knnbuffer* _knnbuffer_owner::c_ptr() +{ + return p_struct; +} -It returns assignment of points to clusters (array of cluster indexes). +alglib_impl::knnbuffer* _knnbuffer_owner::c_ptr() const +{ + return const_cast(p_struct); +} +knnbuffer::knnbuffer() : _knnbuffer_owner() +{ +} -There is one more function with similar name - ClusterizerSeparatedByCorr, -which returns clusters with intercluster correlation equal to R or LOWER -(note: higher for distance, lower for correlation). +knnbuffer::knnbuffer(const knnbuffer &rhs):_knnbuffer_owner(rhs) +{ +} -INPUT PARAMETERS: - Rep - report from ClusterizerRunAHC() performed on XY - R - desired minimum intercluster distance, R>=0 +knnbuffer& knnbuffer::operator=(const knnbuffer &rhs) +{ + if( this==&rhs ) + return *this; + _knnbuffer_owner::operator=(rhs); + return *this; +} -OUTPUT PARAMETERS: - K - number of clusters, 1<=K<=NPoints - CIdx - array[NPoints], I-th element contains cluster index (from - 0 to K-1) for I-th point of the dataset. - CZ - array[K]. This array allows to convert cluster indexes - returned by this function to indexes used by Rep.Z. J-th - cluster returned by this function corresponds to CZ[J]-th - cluster stored in Rep.Z/PZ/PM. - It is guaranteed that CZ[I](rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} - ae_assert(ae_isfinite(r, _state)&&ae_fp_greater_eq(r,(double)(0)), "ClusterizerSeparatedByDist: R is infinite or less than 0", _state); - *k = 1; - while(*knpoints&&ae_fp_greater_eq(rep->mergedist.ptr.p_double[rep->npoints-1-(*k)],r)) +_knnbuilder_owner& _knnbuilder_owner::operator=(const _knnbuilder_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - *k = *k+1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - clusterizergetkclusters(rep, *k, cidx, cz, _state); + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: knnbuilder assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnbuilder assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_knnbuilder_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::knnbuilder)); + alglib_impl::_knnbuilder_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; } +_knnbuilder_owner::~_knnbuilder_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_knnbuilder_destroy(p_struct); + ae_free(p_struct); + } +} -/************************************************************************* -This function accepts AHC report Rep, desired maximum intercluster -correlation and returns top clusters from hierarchical clusterization tree -which are separated by correlation R or LOWER. +alglib_impl::knnbuilder* _knnbuilder_owner::c_ptr() +{ + return p_struct; +} -It returns assignment of points to clusters (array of cluster indexes). +alglib_impl::knnbuilder* _knnbuilder_owner::c_ptr() const +{ + return const_cast(p_struct); +} +knnbuilder::knnbuilder() : _knnbuilder_owner() +{ +} -There is one more function with similar name - ClusterizerSeparatedByDist, -which returns clusters with intercluster distance equal to R or HIGHER -(note: higher for distance, lower for correlation). +knnbuilder::knnbuilder(const knnbuilder &rhs):_knnbuilder_owner(rhs) +{ +} -INPUT PARAMETERS: - Rep - report from ClusterizerRunAHC() performed on XY - R - desired maximum intercluster correlation, -1<=R<=+1 +knnbuilder& knnbuilder::operator=(const knnbuilder &rhs) +{ + if( this==&rhs ) + return *this; + _knnbuilder_owner::operator=(rhs); + return *this; +} -OUTPUT PARAMETERS: - K - number of clusters, 1<=K<=NPoints - CIdx - array[NPoints], I-th element contains cluster index (from - 0 to K-1) for I-th point of the dataset. - CZ - array[K]. This array allows to convert cluster indexes - returned by this function to indexes used by Rep.Z. J-th - cluster returned by this function corresponds to CZ[J]-th - cluster stored in Rep.Z/PZ/PM. - It is guaranteed that CZ[I]npoints&&ae_fp_greater_eq(rep->mergedist.ptr.p_double[rep->npoints-1-(*k)],1-r)) + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - *k = *k+1; + if( p_struct!=NULL ) + { + alglib_impl::_knnmodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } - clusterizergetkclusters(rep, *k, cidx, cz, _state); + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + p_struct = (alglib_impl::knnmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::knnmodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::knnmodel)); + alglib_impl::_knnmodel_init(p_struct, &_state, ae_false); + ae_state_clear(&_state); } +_knnmodel_owner::_knnmodel_owner(const _knnmodel_owner &rhs) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { + if( p_struct!=NULL ) + { + alglib_impl::_knnmodel_destroy(p_struct); + alglib_impl::ae_free(p_struct); + } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnmodel copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::knnmodel*)alglib_impl::ae_malloc(sizeof(alglib_impl::knnmodel), &_state); + memset(p_struct, 0, sizeof(alglib_impl::knnmodel)); + alglib_impl::_knnmodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} -/************************************************************************* -K-means++ initialization - -INPUT PARAMETERS: - Buf - special reusable structure which stores previously allocated - memory, intended to avoid memory fragmentation when solving - multiple subsequent problems. Must be initialized prior to - usage. +_knnmodel_owner& _knnmodel_owner::operator=(const _knnmodel_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; + + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif + } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: knnmodel assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnmodel assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_knnmodel_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::knnmodel)); + alglib_impl::_knnmodel_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} -OUTPUT PARAMETERS: - Buf - initialized structure +_knnmodel_owner::~_knnmodel_owner() +{ + if( p_struct!=NULL ) + { + alglib_impl::_knnmodel_destroy(p_struct); + ae_free(p_struct); + } +} - -- ALGLIB -- - Copyright 24.07.2015 by Bochkanov Sergey -*************************************************************************/ -void kmeansinitbuf(kmeansbuffers* buf, ae_state *_state) +alglib_impl::knnmodel* _knnmodel_owner::c_ptr() { - ae_frame _frame_block; - apbuffers updateseed; + return p_struct; +} - ae_frame_make(_state, &_frame_block); - _apbuffers_init(&updateseed, _state); +alglib_impl::knnmodel* _knnmodel_owner::c_ptr() const +{ + return const_cast(p_struct); +} +knnmodel::knnmodel() : _knnmodel_owner() +{ +} - ae_shared_pool_set_seed(&buf->updatepool, &updateseed, sizeof(updateseed), _apbuffers_init, _apbuffers_init_copy, _apbuffers_destroy, _state); - ae_frame_leave(_state); +knnmodel::knnmodel(const knnmodel &rhs):_knnmodel_owner(rhs) +{ +} + +knnmodel& knnmodel::operator=(const knnmodel &rhs) +{ + if( this==&rhs ) + return *this; + _knnmodel_owner::operator=(rhs); + return *this; +} + +knnmodel::~knnmodel() +{ } /************************************************************************* -K-means++ clusterization +KNN training report. -INPUT PARAMETERS: - XY - dataset, array [0..NPoints-1,0..NVars-1]. - NPoints - dataset size, NPoints>=K - NVars - number of variables, NVars>=1 - K - desired number of clusters, K>=1 - InitAlgo - initialization algorithm: - * 0 - automatic selection of best algorithm - * 1 - random selection of centers - * 2 - k-means++ - * 3 - fast-greedy init - *-1 - first K rows of dataset are used - (special debug algorithm) - MaxIts - iterations limit or zero for no limit - Restarts - number of restarts, Restarts>=1 - KMeansDbgNoIts- debug flag; if set, Lloyd's iteration is not performed, - only initialization phase. - Buf - special reusable structure which stores previously allocated - memory, intended to avoid memory fragmentation when solving - multiple subsequent problems: - * MUST BE INITIALIZED WITH KMeansInitBuffers() CALL BEFORE - FIRST PASS TO THIS FUNCTION! - * subsequent passes must be made without re-initialization +Following fields store training set errors: +* relclserror - fraction of misclassified cases, [0,1] +* avgce - average cross-entropy in bits per symbol +* rmserror - root-mean-square error +* avgerror - average error +* avgrelerror - average relative error -OUTPUT PARAMETERS: - Info - return code: - * -3, if task is degenerate (number of distinct points is - less than K) - * -1, if incorrect NPoints/NFeatures/K/Restarts was passed - * 1, if subroutine finished successfully - IterationsCount- actual number of iterations performed by clusterizer - CCol - array[0..NVars-1,0..K-1].matrix whose columns store - cluster's centers - NeedCCol - True in case caller requires to store result in CCol - CRow - array[0..K-1,0..NVars-1], same as CCol, but centers are - stored in rows - NeedCRow - True in case caller requires to store result in CCol - XYC - array[NPoints], which contains cluster indexes - Energy - merit function of clusterization +For classification problems: +* RMS, AVG and AVGREL errors are calculated for posterior probabilities - -- ALGLIB -- - Copyright 21.03.2009 by Bochkanov Sergey +For regression problems: +* RELCLS and AVGCE errors are zero *************************************************************************/ -void kmeansgenerateinternal(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t k, - ae_int_t initalgo, - ae_int_t maxits, - ae_int_t restarts, - ae_bool kmeansdbgnoits, - ae_int_t* info, - ae_int_t* iterationscount, - /* Real */ ae_matrix* ccol, - ae_bool needccol, - /* Real */ ae_matrix* crow, - ae_bool needcrow, - /* Integer */ ae_vector* xyc, - double* energy, - kmeansbuffers* buf, - ae_state *_state) +_knnreport_owner::_knnreport_owner() { - ae_frame _frame_block; - ae_int_t i; - ae_int_t j; - ae_int_t i1; - double e; - double eprev; - double v; - double vv; - ae_bool waschanges; - ae_bool zerosizeclusters; - ae_int_t pass; - ae_int_t itcnt; - hqrndstate rs; - - ae_frame_make(_state, &_frame_block); - *info = 0; - *iterationscount = 0; - ae_matrix_clear(ccol); - ae_matrix_clear(crow); - ae_vector_clear(xyc); - *energy = 0; - _hqrndstate_init(&rs, _state); - + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Test parameters - */ - if( ((npointsct, k, nvars, _state); - rmatrixsetlengthatleast(&buf->ctbest, k, nvars, _state); - ivectorsetlengthatleast(&buf->xycprev, npoints, _state); - ivectorsetlengthatleast(&buf->xycbest, npoints, _state); - rvectorsetlengthatleast(&buf->d2, npoints, _state); - ivectorsetlengthatleast(&buf->csizes, k, _state); - *energy = ae_maxrealnumber; - hqrndrandomize(&rs, _state); - for(pass=1; pass<=restarts; pass++) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - - /* - * Select initial centers. - * - * Note that for performance reasons centers are stored in ROWS of CT, not - * in columns. We'll transpose CT in the end and store it in the C. - * - * Also note that SelectInitialCenters() may return degenerate set of centers - * (some of them have no corresponding points in dataset, some are non-distinct). - * Algorithm below is robust enough to deal with such set. - */ - clustering_selectinitialcenters(xy, npoints, nvars, initalgo, k, &buf->ct, &buf->initbuf, &buf->updatepool, _state); - - /* - * Lloyd's iteration - */ - if( !kmeansdbgnoits ) - { - - /* - * Perform iteration as usual, in normal mode - */ - for(i=0; i<=npoints-1; i++) - { - xyc->ptr.p_int[i] = -1; - } - eprev = ae_maxrealnumber; - e = ae_maxrealnumber; - itcnt = 0; - while(maxits==0||itcntxycprev.ptr.p_int[i] = xyc->ptr.p_int[i]; - } - kmeansupdatedistances(xy, 0, npoints, nvars, &buf->ct, 0, k, xyc, &buf->d2, &buf->updatepool, _state); - waschanges = ae_false; - for(i=0; i<=npoints-1; i++) - { - waschanges = waschanges||xyc->ptr.p_int[i]!=buf->xycprev.ptr.p_int[i]; - } - - /* - * Update centers - */ - for(j=0; j<=k-1; j++) - { - buf->csizes.ptr.p_int[j] = 0; - } - for(i=0; i<=k-1; i++) - { - for(j=0; j<=nvars-1; j++) - { - buf->ct.ptr.pp_double[i][j] = (double)(0); - } - } - for(i=0; i<=npoints-1; i++) - { - buf->csizes.ptr.p_int[xyc->ptr.p_int[i]] = buf->csizes.ptr.p_int[xyc->ptr.p_int[i]]+1; - ae_v_add(&buf->ct.ptr.pp_double[xyc->ptr.p_int[i]][0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1)); - } - zerosizeclusters = ae_false; - for(j=0; j<=k-1; j++) - { - if( buf->csizes.ptr.p_int[j]!=0 ) - { - v = (double)1/(double)buf->csizes.ptr.p_int[j]; - ae_v_muld(&buf->ct.ptr.pp_double[j][0], 1, ae_v_len(0,nvars-1), v); - } - zerosizeclusters = zerosizeclusters||buf->csizes.ptr.p_int[j]==0; - } - if( zerosizeclusters ) - { - - /* - * Some clusters have zero size - rare, but possible. - * We'll choose new centers for such clusters using k-means++ rule - * and restart algorithm - */ - if( !clustering_fixcenters(xy, npoints, nvars, &buf->ct, k, &buf->initbuf, &buf->updatepool, _state) ) - { - *info = -3; - ae_frame_leave(_state); - return; - } - continue; - } - - /* - * Stop if one of two conditions is met: - * 1. nothing has changed during iteration - * 2. energy function increased after recalculation on new centers - */ - e = (double)(0); - for(i=0; i<=npoints-1; i++) - { - v = 0.0; - i1 = xyc->ptr.p_int[i]; - for(j=0; j<=nvars-1; j++) - { - vv = xy->ptr.pp_double[i][j]-buf->ct.ptr.pp_double[i1][j]; - v = v+vv*vv; - } - e = e+v; - } - if( !waschanges||ae_fp_greater_eq(e,eprev) ) - { - break; - } - - /* - * Update EPrev - */ - eprev = e; - } - } - else - { - - /* - * Debug mode: no Lloyd's iteration. - * We just calculate potential E. - */ - kmeansupdatedistances(xy, 0, npoints, nvars, &buf->ct, 0, k, xyc, &buf->d2, &buf->updatepool, _state); - e = (double)(0); - for(i=0; i<=npoints-1; i++) - { - e = e+buf->d2.ptr.p_double[i]; - } - } - - /* - * Compare E with best centers found so far - */ - if( ae_fp_less(e,*energy) ) + if( p_struct!=NULL ) { - - /* - * store partition. - */ - *energy = e; - copymatrix(&buf->ct, 0, k-1, 0, nvars-1, &buf->ctbest, 0, k-1, 0, nvars-1, _state); - for(i=0; i<=npoints-1; i++) - { - buf->xycbest.ptr.p_int[i] = xyc->ptr.p_int[i]; - } + alglib_impl::_knnreport_destroy(p_struct); + alglib_impl::ae_free(p_struct); } + p_struct = NULL; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return; +#endif } + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + p_struct = NULL; + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnreport copy constructor failure (source is not initialized)", &_state); + p_struct = (alglib_impl::knnreport*)alglib_impl::ae_malloc(sizeof(alglib_impl::knnreport), &_state); + memset(p_struct, 0, sizeof(alglib_impl::knnreport)); + alglib_impl::_knnreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); +} + +_knnreport_owner& _knnreport_owner::operator=(const _knnreport_owner &rhs) +{ + if( this==&rhs ) + return *this; + jmp_buf _break_jump; + alglib_impl::ae_state _state; - /* - * Copy and transpose - */ - if( needccol ) - { - ae_matrix_set_length(ccol, nvars, k, _state); - copyandtranspose(&buf->ctbest, 0, k-1, 0, nvars-1, ccol, 0, nvars-1, 0, k-1, _state); - } - if( needcrow ) + alglib_impl::ae_state_init(&_state); + if( setjmp(_break_jump) ) { - ae_matrix_set_length(crow, k, nvars, _state); - rmatrixcopy(k, nvars, &buf->ctbest, 0, 0, crow, 0, 0, _state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_state.error_msg); + return *this; +#endif } - for(i=0; i<=npoints-1; i++) + alglib_impl::ae_state_set_break_jump(&_state, &_break_jump); + alglib_impl::ae_assert(p_struct!=NULL, "ALGLIB: knnreport assignment constructor failure (destination is not initialized)", &_state); + alglib_impl::ae_assert(rhs.p_struct!=NULL, "ALGLIB: knnreport assignment constructor failure (source is not initialized)", &_state); + alglib_impl::_knnreport_destroy(p_struct); + memset(p_struct, 0, sizeof(alglib_impl::knnreport)); + alglib_impl::_knnreport_init_copy(p_struct, const_cast(rhs.p_struct), &_state, ae_false); + ae_state_clear(&_state); + return *this; +} + +_knnreport_owner::~_knnreport_owner() +{ + if( p_struct!=NULL ) { - xyc->ptr.p_int[i] = buf->xycbest.ptr.p_int[i]; + alglib_impl::_knnreport_destroy(p_struct); + ae_free(p_struct); } - ae_frame_leave(_state); } +alglib_impl::knnreport* _knnreport_owner::c_ptr() +{ + return p_struct; +} -/************************************************************************* -This procedure recalculates distances from points to centers and assigns -each point to closest center. +alglib_impl::knnreport* _knnreport_owner::c_ptr() const +{ + return const_cast(p_struct); +} +knnreport::knnreport() : _knnreport_owner() ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror) +{ +} -INPUT PARAMETERS: - XY - dataset, array [0..NPoints-1,0..NVars-1]. - Idx0,Idx1 - define range of dataset [Idx0,Idx1) to process; - right boundary is not included. - NVars - number of variables, NVars>=1 - CT - matrix of centers, centers are stored in rows - CIdx0,CIdx1 - define range of centers [CIdx0,CIdx1) to process; - right boundary is not included. - XYC - preallocated output buffer, - XYDist2 - preallocated output buffer - Tmp - temporary buffer, automatically reallocated if needed - BufferPool - shared pool seeded with instance of APBuffers structure - (seed instance can be unitialized). It is recommended - to use this pool only with KMeansUpdateDistances() - function. +knnreport::knnreport(const knnreport &rhs):_knnreport_owner(rhs) ,relclserror(p_struct->relclserror),avgce(p_struct->avgce),rmserror(p_struct->rmserror),avgerror(p_struct->avgerror),avgrelerror(p_struct->avgrelerror) +{ +} -OUTPUT PARAMETERS: - XYC - new assignment of points to centers are stored - in [Idx0,Idx1) - XYDist2 - squared distances from points to their centers are - stored in [Idx0,Idx1) +knnreport& knnreport::operator=(const knnreport &rhs) +{ + if( this==&rhs ) + return *this; + _knnreport_owner::operator=(rhs); + return *this; +} - -- ALGLIB -- - Copyright 21.01.2015 by Bochkanov Sergey -*************************************************************************/ -void kmeansupdatedistances(/* Real */ ae_matrix* xy, - ae_int_t idx0, - ae_int_t idx1, - ae_int_t nvars, - /* Real */ ae_matrix* ct, - ae_int_t cidx0, - ae_int_t cidx1, - /* Integer */ ae_vector* xyc, - /* Real */ ae_vector* xydist2, - ae_shared_pool* bufferpool, - ae_state *_state) +knnreport::~knnreport() { - ae_frame _frame_block; - ae_int_t i; - ae_int_t i0; - ae_int_t i1; - ae_int_t j; - ae_int_t cclosest; - double dclosest; - double vv; - apbuffers *buf; - ae_smart_ptr _buf; - double rcomplexity; - ae_int_t task0; - ae_int_t task1; - ae_int_t pblkcnt; - ae_int_t cblkcnt; - ae_int_t vblkcnt; - ae_int_t pblk; - ae_int_t cblk; - ae_int_t vblk; - ae_int_t p0; - ae_int_t p1; - ae_int_t c0; - ae_int_t c1; - ae_int_t v0; - ae_int_t v1; - double v00; - double v01; - double v10; - double v11; - double vp0; - double vp1; - double vc0; - double vc1; - ae_int_t pcnt; - ae_int_t pcntpadded; - ae_int_t ccnt; - ae_int_t ccntpadded; - ae_int_t offs0; - ae_int_t offs00; - ae_int_t offs01; - ae_int_t offs10; - ae_int_t offs11; - ae_int_t vcnt; - ae_int_t stride; +} - ae_frame_make(_state, &_frame_block); - ae_smart_ptr_init(&_buf, (void**)&buf, _state); - - /* - * Quick exit for special cases - */ - if( idx1<=idx0 ) +/************************************************************************* +This function serializes data structure to string. + +Important properties of s_out: +* it contains alphanumeric characters, dots, underscores, minus signs +* these symbols are grouped into words, which are separated by spaces + and Windows-style (CR+LF) newlines +* although serializer uses spaces and CR+LF as separators, you can + replace any separator character by arbitrary combination of spaces, + tabs, Windows or Unix newlines. It allows flexible reformatting of + the string in case you want to include it into text or XML file. + But you should not insert separators into the middle of the "words" + nor you should change case of letters. +* s_out can be freely moved between 32-bit and 64-bit systems, little + and big endian machines, and so on. You can serialize structure on + 32-bit machine and unserialize it on 64-bit one (or vice versa), or + serialize it on SPARC and unserialize on x86. You can also + serialize it in C++ version of ALGLIB and unserialize in C# one, + and vice versa. +*************************************************************************/ +void knnserialize(knnmodel &obj, std::string &s_out) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + alglib_impl::ae_int_t ssize; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); return; +#endif } - if( cidx1<=cidx0 ) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::knnalloc(&serializer, obj.c_ptr(), &state); + ssize = alglib_impl::ae_serializer_get_alloc_size(&serializer); + s_out.clear(); + s_out.reserve((size_t)(ssize+1)); + alglib_impl::ae_serializer_sstart_str(&serializer, &s_out); + alglib_impl::knnserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_assert( s_out.length()<=(size_t)ssize, "ALGLIB: serialization integrity error", &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} +/************************************************************************* +This function unserializes data structure from string. +*************************************************************************/ +void knnunserialize(const std::string &s_in, knnmodel &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); return; +#endif } - if( nvars<=0 ) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_str(&serializer, &s_in); + alglib_impl::knnunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} + + +/************************************************************************* +This function serializes data structure to C++ stream. + +Data stream generated by this function is same as string representation +generated by string version of serializer - alphanumeric characters, +dots, underscores, minus signs, which are grouped into words separated by +spaces and CR+LF. + +We recommend you to read comments on string version of serializer to find +out more about serialization of AlGLIB objects. +*************************************************************************/ +void knnserialize(knnmodel &obj, std::ostream &s_out) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); return; +#endif } - - /* - * Try to recursively divide/process dataset - * - * NOTE: real arithmetics is used to avoid integer overflow on large problem sizes - */ - rcomplexity = (double)(idx1-idx0); - rcomplexity = rcomplexity*(cidx1-cidx0); - rcomplexity = rcomplexity*nvars; - if( ((ae_fp_greater_eq(rcomplexity,clustering_parallelcomplexity)&&idx1-idx0>=2*clustering_kmeansblocksize)&&nvars>=clustering_kmeansparalleldim)&&cidx1-cidx0>=clustering_kmeansparallelk ) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_alloc_start(&serializer); + alglib_impl::knnalloc(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_get_alloc_size(&serializer); // not actually needed, but we have to ask + alglib_impl::ae_serializer_sstart_stream(&serializer, &s_out); + alglib_impl::knnserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} +/************************************************************************* +This function unserializes data structure from stream. +*************************************************************************/ +void knnunserialize(const std::istream &s_in, knnmodel &obj) +{ + jmp_buf _break_jump; + alglib_impl::ae_state state; + alglib_impl::ae_serializer serializer; + + alglib_impl::ae_state_init(&state); + if( setjmp(_break_jump) ) { - splitlength(idx1-idx0, clustering_kmeansblocksize, &task0, &task1, _state); - kmeansupdatedistances(xy, idx0, idx0+task0, nvars, ct, cidx0, cidx1, xyc, xydist2, bufferpool, _state); - kmeansupdatedistances(xy, idx0+task0, idx1, nvars, ct, cidx0, cidx1, xyc, xydist2, bufferpool, _state); - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(state.error_msg); return; +#endif } - - /* - * Dataset chunk is selected. - * - * Process it with blocked algorithm: - * * iterate over points, process them in KMeansBlockSize-ed chunks - * * for each chunk of dataset, iterate over centers, process them in KMeansBlockSize-ed chunks - * * for each chunk of dataset/centerset, iterate over variables, process them in KMeansBlockSize-ed chunks - */ - ae_assert(clustering_kmeansblocksize%2==0, "KMeansUpdateDistances: internal error", _state); - ae_shared_pool_retrieve(bufferpool, &_buf, _state); - rvectorsetlengthatleast(&buf->ra0, clustering_kmeansblocksize*clustering_kmeansblocksize, _state); - rvectorsetlengthatleast(&buf->ra1, clustering_kmeansblocksize*clustering_kmeansblocksize, _state); - rvectorsetlengthatleast(&buf->ra2, clustering_kmeansblocksize*clustering_kmeansblocksize, _state); - rvectorsetlengthatleast(&buf->ra3, clustering_kmeansblocksize, _state); - ivectorsetlengthatleast(&buf->ia3, clustering_kmeansblocksize, _state); - pblkcnt = chunkscount(idx1-idx0, clustering_kmeansblocksize, _state); - cblkcnt = chunkscount(cidx1-cidx0, clustering_kmeansblocksize, _state); - vblkcnt = chunkscount(nvars, clustering_kmeansblocksize, _state); - for(pblk=0; pblk<=pblkcnt-1; pblk++) + ae_state_set_break_jump(&state, &_break_jump); + alglib_impl::ae_serializer_init(&serializer); + alglib_impl::ae_serializer_ustart_stream(&serializer, &s_in); + alglib_impl::knnunserialize(&serializer, obj.c_ptr(), &state); + alglib_impl::ae_serializer_stop(&serializer, &state); + alglib_impl::ae_serializer_clear(&serializer); + alglib_impl::ae_state_clear(&state); +} + +/************************************************************************* +This function creates buffer structure which can be used to perform +parallel KNN requests. + +KNN subpackage provides two sets of computing functions - ones which use +internal buffer of KNN model (these functions are single-threaded because +they use same buffer, which can not shared between threads), and ones +which use external buffer. + +This function is used to initialize external buffer. + +INPUT PARAMETERS + Model - KNN model which is associated with newly created buffer + +OUTPUT PARAMETERS + Buf - external buffer. + + +IMPORTANT: buffer object should be used only with model which was used to + initialize buffer. Any attempt to use buffer with different + object is dangerous - you may get integrity check failure + (exception) because sizes of internal arrays do not fit to + dimensions of the model structure. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knncreatebuffer(const knnmodel &model, knnbuffer &buf, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Process PBlk-th chunk of dataset. - */ - p0 = idx0+pblk*clustering_kmeansblocksize; - p1 = ae_minint(p0+clustering_kmeansblocksize, idx1, _state); - - /* - * Prepare RA3[]/IA3[] for storage of best distances and best cluster numbers. - */ - for(i=0; i<=clustering_kmeansblocksize-1; i++) - { - buf->ra3.ptr.p_double[i] = ae_maxrealnumber; - buf->ia3.ptr.p_int[i] = -1; - } - - /* - * Iterare over chunks of centerset. - */ - for(cblk=0; cblk<=cblkcnt-1; cblk++) - { - - /* - * Process CBlk-th chunk of centerset - */ - c0 = cidx0+cblk*clustering_kmeansblocksize; - c1 = ae_minint(c0+clustering_kmeansblocksize, cidx1, _state); - - /* - * At this point we have to calculate a set of pairwise distances - * between points [P0,P1) and centers [C0,C1) and select best center - * for each point. It can also be done with blocked algorithm - * (blocking for variables). - * - * Following arrays are used: - * * RA0[] - matrix of distances, padded by zeros for even size, - * rows are stored with stride KMeansBlockSize. - * * RA1[] - matrix of points (variables corresponding to current - * block are extracted), padded by zeros for even size, - * rows are stored with stride KMeansBlockSize. - * * RA2[] - matrix of centers (variables corresponding to current - * block are extracted), padded by zeros for even size, - * rows are stored with stride KMeansBlockSize. - * - */ - pcnt = p1-p0; - pcntpadded = pcnt+pcnt%2; - ccnt = c1-c0; - ccntpadded = ccnt+ccnt%2; - stride = clustering_kmeansblocksize; - ae_assert(pcntpadded<=clustering_kmeansblocksize, "KMeansUpdateDistances: integrity error", _state); - ae_assert(ccntpadded<=clustering_kmeansblocksize, "KMeansUpdateDistances: integrity error", _state); - for(i=0; i<=pcntpadded-1; i++) - { - for(j=0; j<=ccntpadded-1; j++) - { - buf->ra0.ptr.p_double[i*stride+j] = 0.0; - } - } - for(vblk=0; vblk<=vblkcnt-1; vblk++) - { - - /* - * Fetch VBlk-th block of variables to arrays RA1 (points) and RA2 (centers). - * Pad points and centers with zeros. - */ - v0 = vblk*clustering_kmeansblocksize; - v1 = ae_minint(v0+clustering_kmeansblocksize, nvars, _state); - vcnt = v1-v0; - for(i=0; i<=pcnt-1; i++) - { - for(j=0; j<=vcnt-1; j++) - { - buf->ra1.ptr.p_double[i*stride+j] = xy->ptr.pp_double[p0+i][v0+j]; - } - } - for(i=pcnt; i<=pcntpadded-1; i++) - { - for(j=0; j<=vcnt-1; j++) - { - buf->ra1.ptr.p_double[i*stride+j] = 0.0; - } - } - for(i=0; i<=ccnt-1; i++) - { - for(j=0; j<=vcnt-1; j++) - { - buf->ra2.ptr.p_double[i*stride+j] = ct->ptr.pp_double[c0+i][v0+j]; - } - } - for(i=ccnt; i<=ccntpadded-1; i++) - { - for(j=0; j<=vcnt-1; j++) - { - buf->ra2.ptr.p_double[i*stride+j] = 0.0; - } - } - - /* - * Update distance matrix with sums-of-squared-differences of RA1 and RA2 - */ - i0 = 0; - while(i0ra0.ptr.p_double[offs0]; - v01 = buf->ra0.ptr.p_double[offs0+1]; - v10 = buf->ra0.ptr.p_double[offs0+stride]; - v11 = buf->ra0.ptr.p_double[offs0+stride+1]; - offs00 = i0*stride; - offs01 = offs00+stride; - offs10 = i1*stride; - offs11 = offs10+stride; - for(j=0; j<=vcnt-1; j++) - { - vp0 = buf->ra1.ptr.p_double[offs00+j]; - vp1 = buf->ra1.ptr.p_double[offs01+j]; - vc0 = buf->ra2.ptr.p_double[offs10+j]; - vc1 = buf->ra2.ptr.p_double[offs11+j]; - vv = vp0-vc0; - v00 = v00+vv*vv; - vv = vp0-vc1; - v01 = v01+vv*vv; - vv = vp1-vc0; - v10 = v10+vv*vv; - vv = vp1-vc1; - v11 = v11+vv*vv; - } - offs0 = i0*stride+i1; - buf->ra0.ptr.p_double[offs0] = v00; - buf->ra0.ptr.p_double[offs0+1] = v01; - buf->ra0.ptr.p_double[offs0+stride] = v10; - buf->ra0.ptr.p_double[offs0+stride+1] = v11; - i1 = i1+2; - } - i0 = i0+2; - } - } - for(i=0; i<=pcnt-1; i++) - { - cclosest = buf->ia3.ptr.p_int[i]; - dclosest = buf->ra3.ptr.p_double[i]; - for(j=0; j<=ccnt-1; j++) - { - if( ae_fp_less(buf->ra0.ptr.p_double[i*stride+j],dclosest) ) - { - dclosest = buf->ra0.ptr.p_double[i*stride+j]; - cclosest = c0+j; - } - } - buf->ia3.ptr.p_int[i] = cclosest; - buf->ra3.ptr.p_double[i] = dclosest; - } - } - - /* - * Store best centers to XYC[] - */ - for(i=p0; i<=p1-1; i++) - { - xyc->ptr.p_int[i] = buf->ia3.ptr.p_int[i-p0]; - xydist2->ptr.p_double[i] = buf->ra3.ptr.p_double[i-p0]; - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - ae_shared_pool_recycle(bufferpool, &_buf, _state); - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knncreatebuffer(const_cast(model.c_ptr()), const_cast(buf.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function selects initial centers according to specified initialization -algorithm. +This subroutine creates KNNBuilder object which is used to train KNN models. -IMPORTANT: this function provides no guarantees regarding selection of - DIFFERENT centers. Centers returned by this function may - include duplicates (say, when random sampling is used). It is - also possible that some centers are empty. - Algorithm which uses this function must be able to deal with it. - Say, you may want to use FixCenters() in order to fix empty centers. +By default, new builder stores empty dataset and some reasonable default +settings. At the very least, you should specify dataset prior to building +KNN model. You can also tweak settings of the model construction algorithm +(recommended, although default settings should work well). + +Following actions are mandatory: +* calling knnbuildersetdataset() to specify dataset +* calling knnbuilderbuildknnmodel() to build KNN model using current + dataset and default settings + +Additionally, you may call: +* knnbuildersetnorm() to change norm being used INPUT PARAMETERS: - XY - dataset, array [0..NPoints-1,0..NVars-1]. - NPoints - points count - NVars - number of variables, NVars>=1 - InitAlgo - initialization algorithm: - * 0 - automatic selection of best algorithm - * 1 - random selection - * 2 - k-means++ - * 3 - fast-greedy init - *-1 - first K rows of dataset are used (debug algorithm) - K - number of centers, K>=1 - CT - possibly preallocated output buffer, resized if needed - InitBuf - internal buffer, possibly unitialized instance of - APBuffers. It is recommended to use this instance only - with SelectInitialCenters() and FixCenters() functions, - because these functions may allocate really large storage. - UpdatePool - shared pool seeded with instance of APBuffers structure - (seed instance can be unitialized). Used internally with - KMeansUpdateDistances() function. It is recommended - to use this pool ONLY with KMeansUpdateDistances() - function. + none OUTPUT PARAMETERS: - CT - set of K clusters, one per row - -RESULT: - True on success, False on failure (impossible to create K independent clusters) + S - KNN builder -- ALGLIB -- - Copyright 21.01.2015 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -static void clustering_selectinitialcenters(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t initalgo, - ae_int_t k, - /* Real */ ae_matrix* ct, - apbuffers* initbuf, - ae_shared_pool* updatepool, - ae_state *_state) +void knnbuildercreate(knnbuilder &s, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t cidx; - ae_int_t i; - ae_int_t j; - double v; - double vv; - double s; - ae_int_t lastnz; - ae_int_t ptidx; - ae_int_t samplesize; - ae_int_t samplescntnew; - ae_int_t samplescntall; - double samplescale; - hqrndstate rs; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnbuildercreate(const_cast(s.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} - ae_frame_make(_state, &_frame_block); - _hqrndstate_init(&rs, _state); +/************************************************************************* +Specifies regression problem (one or more continuous output variables are +predicted). There also exists "classification" version of this function. + +This subroutine adds dense dataset to the internal storage of the builder +object. Specifying your dataset in the dense format means that the dense +version of the KNN construction algorithm will be invoked. + +INPUT PARAMETERS: + S - KNN builder object + XY - array[NPoints,NVars+NOut] (note: actual size can be + larger, only leading part is used anyway), dataset: + * first NVars elements of each row store values of the + independent variables + * next NOut elements store values of the dependent + variables + NPoints - number of rows in the dataset, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NOut - number of dependent variables, NOut>=1 - hqrndrandomize(&rs, _state); - - /* - * Check parameters - */ - ae_assert(npoints>0, "SelectInitialCenters: internal error", _state); - ae_assert(nvars>0, "SelectInitialCenters: internal error", _state); - ae_assert(k>0, "SelectInitialCenters: internal error", _state); - if( initalgo==0 ) +OUTPUT PARAMETERS: + S - KNN builder + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnbuildersetdatasetreg(const knnbuilder &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nout, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - initalgo = 3; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif } - rmatrixsetlengthatleast(ct, k, nvars, _state); - - /* - * Random initialization - */ - if( initalgo==-1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnbuildersetdatasetreg(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nvars, nout, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Specifies classification problem (two or more classes are predicted). +There also exists "regression" version of this function. + +This subroutine adds dense dataset to the internal storage of the builder +object. Specifying your dataset in the dense format means that the dense +version of the KNN construction algorithm will be invoked. + +INPUT PARAMETERS: + S - KNN builder object + XY - array[NPoints,NVars+1] (note: actual size can be + larger, only leading part is used anyway), dataset: + * first NVars elements of each row store values of the + independent variables + * next element stores class index, in [0,NClasses) + NPoints - number of rows in the dataset, NPoints>=1 + NVars - number of independent variables, NVars>=1 + NClasses - number of classes, NClasses>=2 + +OUTPUT PARAMETERS: + S - KNN builder + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnbuildersetdatasetcls(const knnbuilder &s, const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t nclasses, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - for(i=0; i<=k-1; i++) - { - ae_v_move(&ct->ptr.pp_double[i][0], 1, &xy->ptr.pp_double[i%npoints][0], 1, ae_v_len(0,nvars-1)); - } - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * Random initialization - */ - if( initalgo==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnbuildersetdatasetcls(const_cast(s.c_ptr()), const_cast(xy.c_ptr()), npoints, nvars, nclasses, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This function sets norm type used for neighbor search. + +INPUT PARAMETERS: + S - decision forest builder object + NormType - norm type: + * 0 inf-norm + * 1 1-norm + * 2 Euclidean norm (default) + +OUTPUT PARAMETERS: + S - decision forest builder + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnbuildersetnorm(const knnbuilder &s, const ae_int_t nrmtype, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - for(i=0; i<=k-1; i++) - { - j = hqrnduniformi(&rs, npoints, _state); - ae_v_move(&ct->ptr.pp_double[i][0], 1, &xy->ptr.pp_double[j][0], 1, ae_v_len(0,nvars-1)); - } - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * k-means++ initialization - */ - if( initalgo==2 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnbuildersetnorm(const_cast(s.c_ptr()), nrmtype, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +This subroutine builds KNN model according to current settings, using +dataset internally stored in the builder object. + +The model being built performs inference using Eps-approximate K nearest +neighbors search algorithm, with: +* K=1, Eps=0 corresponding to the "nearest neighbor algorithm" +* K>1, Eps=0 corresponding to the "K nearest neighbors algorithm" +* K>=1, Eps>0 corresponding to "approximate nearest neighbors algorithm" + +An approximate KNN is a good option for high-dimensional datasets (exact +KNN works slowly when dimensions count grows). + +An ALGLIB implementation of kd-trees is used to perform k-nn searches. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + S - KNN builder object + K - number of neighbors to search for, K>=1 + Eps - approximation factor: + * Eps=0 means that exact kNN search is performed + * Eps>0 means that (1+Eps)-approximate search is performed + +OUTPUT PARAMETERS: + Model - KNN model + Rep - report + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnbuilderbuildknnmodel(const knnbuilder &s, const ae_int_t k, const double eps, knnmodel &model, knnreport &rep, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Prepare distances array. - * Select initial center at random. - */ - rvectorsetlengthatleast(&initbuf->ra0, npoints, _state); - for(i=0; i<=npoints-1; i++) - { - initbuf->ra0.ptr.p_double[i] = ae_maxrealnumber; - } - ptidx = hqrnduniformi(&rs, npoints, _state); - ae_v_move(&ct->ptr.pp_double[0][0], 1, &xy->ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - - /* - * For each newly added center repeat: - * * reevaluate distances from points to best centers - * * sample points with probability dependent on distance - * * add new center - */ - for(cidx=0; cidx<=k-2; cidx++) - { - - /* - * Reevaluate distances - */ - s = 0.0; - for(i=0; i<=npoints-1; i++) - { - v = 0.0; - for(j=0; j<=nvars-1; j++) - { - vv = xy->ptr.pp_double[i][j]-ct->ptr.pp_double[cidx][j]; - v = v+vv*vv; - } - if( ae_fp_less(v,initbuf->ra0.ptr.p_double[i]) ) - { - initbuf->ra0.ptr.p_double[i] = v; - } - s = s+initbuf->ra0.ptr.p_double[i]; - } - - /* - * If all distances are zero, it means that we can not find enough - * distinct points. In this case we just select non-distinct center - * at random and continue iterations. This issue will be handled - * later in the FixCenters() function. - */ - if( ae_fp_eq(s,0.0) ) - { - ptidx = hqrnduniformi(&rs, npoints, _state); - ae_v_move(&ct->ptr.pp_double[cidx+1][0], 1, &xy->ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - continue; - } - - /* - * Select point as center using its distance. - * We also handle situation when because of rounding errors - * no point was selected - in this case, last non-zero one - * will be used. - */ - v = hqrnduniformr(&rs, _state); - vv = 0.0; - lastnz = -1; - ptidx = -1; - for(i=0; i<=npoints-1; i++) - { - if( ae_fp_eq(initbuf->ra0.ptr.p_double[i],0.0) ) - { - continue; - } - lastnz = i; - vv = vv+initbuf->ra0.ptr.p_double[i]; - if( ae_fp_less_eq(v,vv/s) ) - { - ptidx = i; - break; - } - } - ae_assert(lastnz>=0, "SelectInitialCenters: integrity error", _state); - if( ptidx<0 ) - { - ptidx = lastnz; - } - ae_v_move(&ct->ptr.pp_double[cidx+1][0], 1, &xy->ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - } - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * "Fast-greedy" algorithm based on "Scalable k-means++". - * - * We perform several rounds, within each round we sample about 0.5*K points - * (not exactly 0.5*K) until we have 2*K points sampled. Before each round - * we calculate distances from dataset points to closest points sampled so far. - * We sample dataset points independently using distance xtimes 0.5*K divided by total - * as probability (similar to k-means++, but each point is sampled independently; - * after each round we have roughtly 0.5*K points added to sample). - * - * After sampling is done, we run "greedy" version of k-means++ on this subsample - * which selects most distant point on every round. - */ - if( initalgo==3 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnbuilderbuildknnmodel(const_cast(s.c_ptr()), k, eps, const_cast(model.c_ptr()), const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Changing search settings of KNN model. + +K and EPS parameters of KNN (AKNN) search are specified during model +construction. However, plain KNN algorithm with Euclidean distance allows +you to change them at any moment. + +NOTE: future versions of KNN model may support advanced versions of KNN, + such as NCA or LMNN. It is possible that such algorithms won't allow + you to change search settings on the fly. If you call this function + for an algorithm which does not support on-the-fly changes, it will + throw an exception. + +INPUT PARAMETERS: + Model - KNN model + K - K>=1, neighbors count + EPS - accuracy of the EPS-approximate NN search. Set to 0.0, if + you want to perform "classic" KNN search. Specify larger + values if you need to speed-up high-dimensional KNN + queries. + +OUTPUT PARAMETERS: + nothing on success, exception on failure + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnrewritekeps(const knnmodel &model, const ae_int_t k, const double eps, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Prepare arrays. - * Select initial center at random, add it to "new" part of sample, - * which is stored at the beginning of the array - */ - samplesize = 2*k; - samplescale = 0.5*k; - rmatrixsetlengthatleast(&initbuf->rm0, samplesize, nvars, _state); - ptidx = hqrnduniformi(&rs, npoints, _state); - ae_v_move(&initbuf->rm0.ptr.pp_double[0][0], 1, &xy->ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - samplescntnew = 1; - samplescntall = 1; - rvectorsetlengthatleast(&initbuf->ra0, npoints, _state); - rvectorsetlengthatleast(&initbuf->ra1, npoints, _state); - ivectorsetlengthatleast(&initbuf->ia1, npoints, _state); - for(i=0; i<=npoints-1; i++) - { - initbuf->ra0.ptr.p_double[i] = ae_maxrealnumber; - } - - /* - * Repeat until samples count is 2*K - */ - while(samplescntallrm0, samplescntall-samplescntnew, samplescntall, &initbuf->ia1, &initbuf->ra1, updatepool, _state); - samplescntnew = 0; - - /* - * Merge new distances with old ones. - * Calculate sum of distances, if sum is exactly zero - fill sample - * by randomly selected points and terminate. - */ - s = 0.0; - for(i=0; i<=npoints-1; i++) - { - initbuf->ra0.ptr.p_double[i] = ae_minreal(initbuf->ra0.ptr.p_double[i], initbuf->ra1.ptr.p_double[i], _state); - s = s+initbuf->ra0.ptr.p_double[i]; - } - if( ae_fp_eq(s,0.0) ) - { - while(samplescntallrm0.ptr.pp_double[samplescntall][0], 1, &xy->ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - inc(&samplescntall, _state); - inc(&samplescntnew, _state); - } - break; - } - - /* - * Sample points independently. - */ - for(i=0; i<=npoints-1; i++) - { - if( samplescntall==samplesize ) - { - break; - } - if( ae_fp_eq(initbuf->ra0.ptr.p_double[i],0.0) ) - { - continue; - } - if( ae_fp_less_eq(hqrnduniformr(&rs, _state),samplescale*initbuf->ra0.ptr.p_double[i]/s) ) - { - ae_v_move(&initbuf->rm0.ptr.pp_double[samplescntall][0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1)); - inc(&samplescntall, _state); - inc(&samplescntnew, _state); - } - } - } - - /* - * Run greedy version of k-means on sampled points - */ - rvectorsetlengthatleast(&initbuf->ra0, samplescntall, _state); - for(i=0; i<=samplescntall-1; i++) - { - initbuf->ra0.ptr.p_double[i] = ae_maxrealnumber; - } - ptidx = hqrnduniformi(&rs, samplescntall, _state); - ae_v_move(&ct->ptr.pp_double[0][0], 1, &initbuf->rm0.ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - for(cidx=0; cidx<=k-2; cidx++) - { - - /* - * Reevaluate distances - */ - for(i=0; i<=samplescntall-1; i++) - { - v = 0.0; - for(j=0; j<=nvars-1; j++) - { - vv = initbuf->rm0.ptr.pp_double[i][j]-ct->ptr.pp_double[cidx][j]; - v = v+vv*vv; - } - if( ae_fp_less(v,initbuf->ra0.ptr.p_double[i]) ) - { - initbuf->ra0.ptr.p_double[i] = v; - } - } - - /* - * Select point as center in greedy manner - most distant - * point is selected. - */ - ptidx = 0; - for(i=0; i<=samplescntall-1; i++) - { - if( ae_fp_greater(initbuf->ra0.ptr.p_double[i],initbuf->ra0.ptr.p_double[ptidx]) ) - { - ptidx = i; - } - } - ae_v_move(&ct->ptr.pp_double[cidx+1][0], 1, &initbuf->rm0.ptr.pp_double[ptidx][0], 1, ae_v_len(0,nvars-1)); - } - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - - /* - * Internal error - */ - ae_assert(ae_false, "SelectInitialCenters: internal error", _state); - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnrewritekeps(const_cast(model.c_ptr()), k, eps, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - /************************************************************************* -This function "fixes" centers, i.e. replaces ones which have no neighbor -points by new centers which have at least one neighbor. If it is impossible -to fix centers (not enough distinct points in the dataset), this function -returns False. +Inference using KNN model. + +See also knnprocess0(), knnprocessi() and knnclassify() for options with a +bit more convenient interface. + +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. + + Use knntsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. INPUT PARAMETERS: - XY - dataset, array [0..NPoints-1,0..NVars-1]. - NPoints - points count, >=1 - NVars - number of variables, NVars>=1 - CT - centers - K - number of centers, K>=1 - InitBuf - internal buffer, possibly unitialized instance of - APBuffers. It is recommended to use this instance only - with SelectInitialCenters() and FixCenters() functions, - because these functions may allocate really large storage. - UpdatePool - shared pool seeded with instance of APBuffers structure - (seed instance can be unitialized). Used internally with - KMeansUpdateDistances() function. It is recommended - to use this pool ONLY with KMeansUpdateDistances() - function. + Model - KNN model + X - input vector, array[0..NVars-1]. + Y - possible preallocated buffer. Reused if long enough. OUTPUT PARAMETERS: - CT - set of K centers, one per row - -RESULT: - True on success, False on failure (impossible to create K independent clusters) + Y - result. Regression estimate when solving regression task, + vector of posterior probabilities for classification task. -- ALGLIB -- - Copyright 21.01.2015 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -static ae_bool clustering_fixcenters(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - /* Real */ ae_matrix* ct, - ae_int_t k, - apbuffers* initbuf, - ae_shared_pool* updatepool, - ae_state *_state) +void knnprocess(const knnmodel &model, const real_1d_array &x, real_1d_array &y, const xparams _xparams) { - ae_int_t fixiteration; - ae_int_t centertofix; - ae_int_t i; - ae_int_t j; - ae_int_t pdistant; - double ddistant; - double v; - ae_bool result; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnprocess(const_cast(model.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} +/************************************************************************* +This function returns first component of the inferred vector (i.e. one +with index #0). - ae_assert(npoints>=1, "FixCenters: internal error", _state); - ae_assert(nvars>=1, "FixCenters: internal error", _state); - ae_assert(k>=1, "FixCenters: internal error", _state); - - /* - * Calculate distances from points to best centers (RA0) - * and best center indexes (IA0) - */ - ivectorsetlengthatleast(&initbuf->ia0, npoints, _state); - rvectorsetlengthatleast(&initbuf->ra0, npoints, _state); - kmeansupdatedistances(xy, 0, npoints, nvars, ct, 0, k, &initbuf->ia0, &initbuf->ra0, updatepool, _state); - - /* - * Repeat loop: - * * find first center which has no corresponding point - * * set it to the most distant (from the rest of the centerset) point - * * recalculate distances, update IA0/RA0 - * * repeat - * - * Loop is repeated for at most 2*K iterations. It is stopped once we have - * no "empty" clusters. - */ - bvectorsetlengthatleast(&initbuf->ba0, k, _state); - for(fixiteration=0; fixiteration<=2*k; fixiteration++) +It is a convenience wrapper for knnprocess() intended for either: +* 1-dimensional regression problems +* 2-class classification problems + +In the former case this function returns inference result as scalar, which +is definitely more convenient that wrapping it as vector. In the latter +case it returns probability of object belonging to class #0. + +If you call it for anything different from two cases above, it will work +as defined, i.e. return y[0], although it is of less use in such cases. + +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. + + Use knntsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. + +INPUT PARAMETERS: + Model - KNN model + X - input vector, array[0..NVars-1]. + +RESULT: + Y[0] + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +double knnprocess0(const knnmodel &model, const real_1d_array &x, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Select center to fix (one which is not mentioned in IA0), - * terminate if there is no such center. - * BA0[] stores True for centers which have at least one point. - */ - for(i=0; i<=k-1; i++) - { - initbuf->ba0.ptr.p_bool[i] = ae_false; - } - for(i=0; i<=npoints-1; i++) - { - initbuf->ba0.ptr.p_bool[initbuf->ia0.ptr.p_int[i]] = ae_true; - } - centertofix = -1; - for(i=0; i<=k-1; i++) - { - if( !initbuf->ba0.ptr.p_bool[i] ) - { - centertofix = i; - break; - } - } - if( centertofix<0 ) - { - result = ae_true; - return result; - } - - /* - * Replace center to fix by the most distant point. - * Update IA0/RA0 - */ - pdistant = 0; - ddistant = initbuf->ra0.ptr.p_double[pdistant]; - for(i=0; i<=npoints-1; i++) - { - if( ae_fp_greater(initbuf->ra0.ptr.p_double[i],ddistant) ) - { - ddistant = initbuf->ra0.ptr.p_double[i]; - pdistant = i; - } - } - if( ae_fp_eq(ddistant,0.0) ) - { - break; - } - ae_v_move(&ct->ptr.pp_double[centertofix][0], 1, &xy->ptr.pp_double[pdistant][0], 1, ae_v_len(0,nvars-1)); - for(i=0; i<=npoints-1; i++) - { - v = 0.0; - for(j=0; j<=nvars-1; j++) - { - v = v+ae_sqr(xy->ptr.pp_double[i][j]-ct->ptr.pp_double[centertofix][j], _state); - } - if( ae_fp_less(v,initbuf->ra0.ptr.p_double[i]) ) - { - initbuf->ra0.ptr.p_double[i] = v; - initbuf->ia0.ptr.p_int[i] = centertofix; - } - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - result = ae_false; - return result; + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::knnprocess0(const_cast(model.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } - /************************************************************************* -This function performs agglomerative hierarchical clustering using -precomputed distance matrix. Internal function, should not be called -directly. +This function returns most probable class number for an input X. It is +same as calling knnprocess(model,x,y), then determining i=argmax(y[i]) and +returning i. + +A class number in [0,NOut) range in returned for classification problems, +-1 is returned when this function is called for regression problems. + +IMPORTANT: this function is thread-unsafe and modifies internal structures + of the model! You can not use same model object for parallel + evaluation from several threads. + + Use knntsprocess() with independent thread-local buffers, if + you need thread-safe evaluation. INPUT PARAMETERS: - S - clusterizer state, initialized by ClusterizerCreate() - D - distance matrix, array[S.NFeatures,S.NFeatures] - Contents of the matrix is destroyed during - algorithm operation. + Model - KNN model + X - input vector, array[0..NVars-1]. -OUTPUT PARAMETERS: - Rep - clustering results; see description of AHCReport - structure for more information. +RESULT: + class number, -1 for regression tasks -- ALGLIB -- - Copyright 10.07.2012 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -static void clustering_clusterizerrunahcinternal(clusterizerstate* s, - /* Real */ ae_matrix* d, - ahcreport* rep, - ae_state *_state) +ae_int_t knnclassify(const knnmodel &model, const real_1d_array &x, const xparams _xparams) { - ae_frame _frame_block; - ae_int_t i; - ae_int_t j; - ae_int_t k; - double v; - ae_int_t mergeidx; - ae_int_t c0; - ae_int_t c1; - ae_int_t s0; - ae_int_t s1; - ae_int_t ar; - ae_int_t br; - ae_int_t npoints; - ae_vector cidx; - ae_vector csizes; - ae_vector nnidx; - ae_matrix cinfo; - ae_int_t n0; - ae_int_t n1; - ae_int_t ni; - double d01; + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::ae_int_t result = alglib_impl::knnclassify(const_cast(model.c_ptr()), const_cast(x.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} - ae_frame_make(_state, &_frame_block); - ae_vector_init(&cidx, 0, DT_INT, _state); - ae_vector_init(&csizes, 0, DT_INT, _state); - ae_vector_init(&nnidx, 0, DT_INT, _state); - ae_matrix_init(&cinfo, 0, 0, DT_INT, _state); +/************************************************************************* +'interactive' variant of knnprocess() for languages like Python which +support constructs like "y = knnprocessi(model,x)" and interactive mode of +the interpreter. - npoints = s->npoints; - - /* - * Fill Rep.NPoints, quick exit when NPoints<=1 - */ - rep->npoints = npoints; - if( npoints==0 ) +This function allocates new array on each call, so it is significantly +slower than its 'non-interactive' counterpart, but it is more convenient +when you call it from command line. + +IMPORTANT: this function is thread-unsafe and may modify internal + structures of the model! You can not use same model object for + parallel evaluation from several threads. + + Use knntsprocess() with independent thread-local buffers if + you need thread-safe evaluation. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnprocessi(const knnmodel &model, const real_1d_array &x, real_1d_array &y, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - ae_vector_set_length(&rep->p, 0, _state); - ae_matrix_set_length(&rep->z, 0, 0, _state); - ae_matrix_set_length(&rep->pz, 0, 0, _state); - ae_matrix_set_length(&rep->pm, 0, 0, _state); - ae_vector_set_length(&rep->mergedist, 0, _state); - rep->terminationtype = 1; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - if( npoints==1 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnprocessi(const_cast(model.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Thread-safe procesing using external buffer for temporaries. + +This function is thread-safe (i.e . you can use same KNN model from +multiple threads) as long as you use different buffer objects for different +threads. + +INPUT PARAMETERS: + Model - KNN model + Buf - buffer object, must be allocated specifically for this + model with knncreatebuffer(). + X - input vector, array[NVars] + +OUTPUT PARAMETERS: + Y - result, array[NOut]. Regression estimate when solving + regression task, vector of posterior probabilities for + a classification task. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knntsprocess(const knnmodel &model, const knnbuffer &buf, const real_1d_array &x, real_1d_array &y, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - ae_vector_set_length(&rep->p, 1, _state); - ae_matrix_set_length(&rep->z, 0, 0, _state); - ae_matrix_set_length(&rep->pz, 0, 0, _state); - ae_matrix_set_length(&rep->pm, 0, 0, _state); - ae_vector_set_length(&rep->mergedist, 0, _state); - rep->p.ptr.p_int[0] = 0; - rep->terminationtype = 1; - ae_frame_leave(_state); +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); return; +#endif } - ae_matrix_set_length(&rep->z, npoints-1, 2, _state); - ae_vector_set_length(&rep->mergedist, npoints-1, _state); - rep->terminationtype = 1; - - /* - * Build list of nearest neighbors - */ - ae_vector_set_length(&nnidx, npoints, _state); - for(i=0; i<=npoints-1; i++) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knntsprocess(const_cast(model.c_ptr()), const_cast(buf.c_ptr()), const_cast(x.c_ptr()), const_cast(y.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; +} + +/************************************************************************* +Relative classification error on the test set + +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size + +RESULT: + percent of incorrectly classified cases. + Zero if model solves regression task. + +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +double knnrelclserror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Calculate index of the nearest neighbor - */ - k = -1; - v = ae_maxrealnumber; - for(j=0; j<=npoints-1; j++) - { - if( j!=i&&ae_fp_less(d->ptr.pp_double[i][j],v) ) - { - k = j; - v = d->ptr.pp_double[i][j]; - } - } - ae_assert(ae_fp_less(v,ae_maxrealnumber), "ClusterizerRunAHC: internal error", _state); - nnidx.ptr.p_int[i] = k; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - - /* - * For AHCAlgo=4 (Ward's method) replace distances by their squares times 0.5 - */ - if( s->ahcalgo==4 ) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::knnrelclserror(const_cast(model.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +Average cross-entropy (in bits per element) on the test set + +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size + +RESULT: + CrossEntropy/NPoints. + Zero if model solves regression task. + +NOTE: the cross-entropy metric is too unstable when used to evaluate KNN + models (such models can report exactly zero probabilities), so we + do not recommend using it. + +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +double knnavgce(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - for(i=0; i<=npoints-1; i++) - { - for(j=0; j<=npoints-1; j++) - { - d->ptr.pp_double[i][j] = 0.5*d->ptr.pp_double[i][j]*d->ptr.pp_double[i][j]; - } - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - - /* - * Distance matrix is built, perform merges. - * - * NOTE 1: CIdx is array[NPoints] which maps rows/columns of the - * distance matrix D to indexes of clusters. Values of CIdx - * from [0,NPoints) denote single-point clusters, and values - * from [NPoints,2*NPoints-1) denote ones obtained by merging - * smaller clusters. Negative calues correspond to absent clusters. - * - * Initially it contains [0...NPoints-1], after each merge - * one element of CIdx (one with index C0) is replaced by - * NPoints+MergeIdx, and another one with index C1 is - * rewritten by -1. - * - * NOTE 2: CSizes is array[NPoints] which stores sizes of clusters. - * - */ - ae_vector_set_length(&cidx, npoints, _state); - ae_vector_set_length(&csizes, npoints, _state); - for(i=0; i<=npoints-1; i++) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::knnavgce(const_cast(model.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +RMS error on the test set. + +Its meaning for regression task is obvious. As for classification problems, +RMS error means error when estimating posterior probabilities. + +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size + +RESULT: + root mean square error. + +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +double knnrmserror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - cidx.ptr.p_int[i] = i; - csizes.ptr.p_int[i] = 1; +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - for(mergeidx=0; mergeidx<=npoints-2; mergeidx++) + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::knnrmserror(const_cast(model.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); +} + +/************************************************************************* +Average error on the test set + +Its meaning for regression task is obvious. As for classification problems, +average error means error when estimating posterior probabilities. + +INPUT PARAMETERS: + Model - KNN model + XY - test set + NPoints - test set size + +RESULT: + average error + +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. + + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +double knnavgerror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) +{ + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - - /* - * Select pair of clusters (C0,C1) with CIdx[C0]=0 ) - { - if( ae_fp_less(d->ptr.pp_double[i][nnidx.ptr.p_int[i]],d01) ) - { - c0 = i; - c1 = nnidx.ptr.p_int[i]; - d01 = d->ptr.pp_double[i][nnidx.ptr.p_int[i]]; - } - } - } - ae_assert(ae_fp_less(d01,ae_maxrealnumber), "ClusterizerRunAHC: internal error", _state); - if( cidx.ptr.p_int[c0]>cidx.ptr.p_int[c1] ) - { - i = c1; - c1 = c0; - c0 = i; - } - - /* - * Fill one row of Rep.Z and one element of Rep.MergeDist - */ - rep->z.ptr.pp_int[mergeidx][0] = cidx.ptr.p_int[c0]; - rep->z.ptr.pp_int[mergeidx][1] = cidx.ptr.p_int[c1]; - rep->mergedist.ptr.p_double[mergeidx] = d01; - - /* - * Update distance matrix: - * * row/column C0 are updated by distances to the new cluster - * * row/column C1 are considered empty (we can fill them by zeros, - * but do not want to spend time - we just ignore them) - * - * NOTE: it is important to update distance matrix BEFORE CIdx/CSizes - * are updated. - */ - ae_assert((((s->ahcalgo==0||s->ahcalgo==1)||s->ahcalgo==2)||s->ahcalgo==3)||s->ahcalgo==4, "ClusterizerRunAHC: internal error", _state); - for(i=0; i<=npoints-1; i++) - { - if( i!=c0&&i!=c1 ) - { - n0 = csizes.ptr.p_int[c0]; - n1 = csizes.ptr.p_int[c1]; - ni = csizes.ptr.p_int[i]; - if( s->ahcalgo==0 ) - { - d->ptr.pp_double[i][c0] = ae_maxreal(d->ptr.pp_double[i][c0], d->ptr.pp_double[i][c1], _state); - } - if( s->ahcalgo==1 ) - { - d->ptr.pp_double[i][c0] = ae_minreal(d->ptr.pp_double[i][c0], d->ptr.pp_double[i][c1], _state); - } - if( s->ahcalgo==2 ) - { - d->ptr.pp_double[i][c0] = (csizes.ptr.p_int[c0]*d->ptr.pp_double[i][c0]+csizes.ptr.p_int[c1]*d->ptr.pp_double[i][c1])/(csizes.ptr.p_int[c0]+csizes.ptr.p_int[c1]); - } - if( s->ahcalgo==3 ) - { - d->ptr.pp_double[i][c0] = (d->ptr.pp_double[i][c0]+d->ptr.pp_double[i][c1])/2; - } - if( s->ahcalgo==4 ) - { - d->ptr.pp_double[i][c0] = ((n0+ni)*d->ptr.pp_double[i][c0]+(n1+ni)*d->ptr.pp_double[i][c1]-ni*d01)/(n0+n1+ni); - } - d->ptr.pp_double[c0][i] = d->ptr.pp_double[i][c0]; - } - } - - /* - * Update CIdx and CSizes - */ - cidx.ptr.p_int[c0] = npoints+mergeidx; - cidx.ptr.p_int[c1] = -1; - csizes.ptr.p_int[c0] = csizes.ptr.p_int[c0]+csizes.ptr.p_int[c1]; - csizes.ptr.p_int[c1] = 0; - - /* - * Update nearest neighbors array: - * * update nearest neighbors of everything except for C0/C1 - * * update neighbors of C0/C1 - */ - for(i=0; i<=npoints-1; i++) - { - if( (cidx.ptr.p_int[i]>=0&&i!=c0)&&(nnidx.ptr.p_int[i]==c0||nnidx.ptr.p_int[i]==c1) ) - { - - /* - * I-th cluster which is distinct from C0/C1 has former C0/C1 cluster as its nearest - * neighbor. We handle this issue depending on specific AHC algorithm being used. - */ - if( s->ahcalgo==1 ) - { - - /* - * Single linkage. Merging of two clusters together - * does NOT change distances between new cluster and - * other clusters. - * - * The only thing we have to do is to update nearest neighbor index - */ - nnidx.ptr.p_int[i] = c0; - } - else - { - - /* - * Something other than single linkage. We have to re-examine - * all the row to find nearest neighbor. - */ - k = -1; - v = ae_maxrealnumber; - for(j=0; j<=npoints-1; j++) - { - if( (cidx.ptr.p_int[j]>=0&&j!=i)&&ae_fp_less(d->ptr.pp_double[i][j],v) ) - { - k = j; - v = d->ptr.pp_double[i][j]; - } - } - ae_assert(ae_fp_less(v,ae_maxrealnumber)||mergeidx==npoints-2, "ClusterizerRunAHC: internal error", _state); - nnidx.ptr.p_int[i] = k; - } - } - } - k = -1; - v = ae_maxrealnumber; - for(j=0; j<=npoints-1; j++) - { - if( (cidx.ptr.p_int[j]>=0&&j!=c0)&&ae_fp_less(d->ptr.pp_double[c0][j],v) ) - { - k = j; - v = d->ptr.pp_double[c0][j]; - } - } - ae_assert(ae_fp_less(v,ae_maxrealnumber)||mergeidx==npoints-2, "ClusterizerRunAHC: internal error", _state); - nnidx.ptr.p_int[c0] = k; - } - - /* - * Calculate Rep.P and Rep.PM. - * - * In order to do that, we fill CInfo matrix - (2*NPoints-1)*3 matrix, - * with I-th row containing: - * * CInfo[I,0] - size of I-th cluster - * * CInfo[I,1] - beginning of I-th cluster - * * CInfo[I,2] - end of I-th cluster - * * CInfo[I,3] - height of I-th cluster - * - * We perform it as follows: - * * first NPoints clusters have unit size (CInfo[I,0]=1) and zero - * height (CInfo[I,3]=0) - * * we replay NPoints-1 merges from first to last and fill sizes of - * corresponding clusters (new size is a sum of sizes of clusters - * being merged) and height (new height is max(heights)+1). - * * now we ready to determine locations of clusters. Last cluster - * spans entire dataset, we know it. We replay merges from last to - * first, during each merge we already know location of the merge - * result, and we can position first cluster to the left part of - * the result, and second cluster to the right part. - */ - ae_vector_set_length(&rep->p, npoints, _state); - ae_matrix_set_length(&rep->pm, npoints-1, 6, _state); - ae_matrix_set_length(&cinfo, 2*npoints-1, 4, _state); - for(i=0; i<=npoints-1; i++) - { - cinfo.ptr.pp_int[i][0] = 1; - cinfo.ptr.pp_int[i][3] = 0; - } - for(i=0; i<=npoints-2; i++) - { - cinfo.ptr.pp_int[npoints+i][0] = cinfo.ptr.pp_int[rep->z.ptr.pp_int[i][0]][0]+cinfo.ptr.pp_int[rep->z.ptr.pp_int[i][1]][0]; - cinfo.ptr.pp_int[npoints+i][3] = ae_maxint(cinfo.ptr.pp_int[rep->z.ptr.pp_int[i][0]][3], cinfo.ptr.pp_int[rep->z.ptr.pp_int[i][1]][3], _state)+1; - } - cinfo.ptr.pp_int[2*npoints-2][1] = 0; - cinfo.ptr.pp_int[2*npoints-2][2] = npoints-1; - for(i=npoints-2; i>=0; i--) - { - - /* - * We merge C0 which spans [A0,B0] and C1 (spans [A1,B1]), - * with unknown A0, B0, A1, B1. However, we know that result - * is CR, which spans [AR,BR] with known AR/BR, and we know - * sizes of C0, C1, CR (denotes as S0, S1, SR). - */ - c0 = rep->z.ptr.pp_int[i][0]; - c1 = rep->z.ptr.pp_int[i][1]; - s0 = cinfo.ptr.pp_int[c0][0]; - s1 = cinfo.ptr.pp_int[c1][0]; - ar = cinfo.ptr.pp_int[npoints+i][1]; - br = cinfo.ptr.pp_int[npoints+i][2]; - cinfo.ptr.pp_int[c0][1] = ar; - cinfo.ptr.pp_int[c0][2] = ar+s0-1; - cinfo.ptr.pp_int[c1][1] = br-(s1-1); - cinfo.ptr.pp_int[c1][2] = br; - rep->pm.ptr.pp_int[i][0] = cinfo.ptr.pp_int[c0][1]; - rep->pm.ptr.pp_int[i][1] = cinfo.ptr.pp_int[c0][2]; - rep->pm.ptr.pp_int[i][2] = cinfo.ptr.pp_int[c1][1]; - rep->pm.ptr.pp_int[i][3] = cinfo.ptr.pp_int[c1][2]; - rep->pm.ptr.pp_int[i][4] = cinfo.ptr.pp_int[c0][3]; - rep->pm.ptr.pp_int[i][5] = cinfo.ptr.pp_int[c1][3]; - } - for(i=0; i<=npoints-1; i++) - { - ae_assert(cinfo.ptr.pp_int[i][1]==cinfo.ptr.pp_int[i][2], "Assertion failed", _state); - rep->p.ptr.p_int[i] = cinfo.ptr.pp_int[i][1]; - } - - /* - * Calculate Rep.PZ - */ - ae_matrix_set_length(&rep->pz, npoints-1, 2, _state); - for(i=0; i<=npoints-2; i++) - { - rep->pz.ptr.pp_int[i][0] = rep->z.ptr.pp_int[i][0]; - rep->pz.ptr.pp_int[i][1] = rep->z.ptr.pp_int[i][1]; - if( rep->pz.ptr.pp_int[i][0]pz.ptr.pp_int[i][0] = rep->p.ptr.p_int[rep->pz.ptr.pp_int[i][0]]; - } - if( rep->pz.ptr.pp_int[i][1]pz.ptr.pp_int[i][1] = rep->p.ptr.p_int[rep->pz.ptr.pp_int[i][1]]; - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } - ae_frame_leave(_state); + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::knnavgerror(const_cast(model.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } - /************************************************************************* -This function recursively evaluates distance matrix for SOME (not all!) -distance types. +Average relative error on the test set + +Its meaning for regression task is obvious. As for classification problems, +average relative error means error when estimating posterior probabilities. INPUT PARAMETERS: - XY - array[?,NFeatures], dataset - NFeatures- number of features, >=1 - DistType- distance function: - * 0 Chebyshev distance (L-inf norm) - * 1 city block distance (L1 norm) - D - preallocated output matrix - I0,I1 - half interval of rows to calculate: [I0,I1) is processed - J0,J1 - half interval of cols to calculate: [J0,J1) is processed + Model - KNN model + XY - test set + NPoints - test set size -OUTPUT PARAMETERS: - D - array[NPoints,NPoints], distance matrix - upper triangle and main diagonal are initialized with - data. +RESULT: + average relative error -NOTE: intersection of [I0,I1) and [J0,J1) may completely lie in upper - triangle, only partially intersect with it, or have zero intersection. - In any case, only intersection of submatrix given by [I0,I1)*[J0,J1) - with upper triangle of the matrix is evaluated. - - Say, for 4x4 distance matrix A: - * [0,2)*[0,2) will result in evaluation of A00, A01, A11 - * [2,4)*[2,4) will result in evaluation of A22, A23, A32, A33 - * [2,4)*[0,2) will result in evaluation of empty set of elements - +NOTE: if you need several different kinds of error metrics, it is better + to use knnallerrors() which computes all error metric with just one + pass over dataset. -- ALGLIB -- - Copyright 07.04.2013 by Bochkanov Sergey + Copyright 15.02.2019 by Bochkanov Sergey *************************************************************************/ -static void clustering_evaluatedistancematrixrec(/* Real */ ae_matrix* xy, - ae_int_t nfeatures, - ae_int_t disttype, - /* Real */ ae_matrix* d, - ae_int_t i0, - ae_int_t i1, - ae_int_t j0, - ae_int_t j1, - ae_state *_state) +double knnavgrelerror(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, const xparams _xparams) { - double rcomplexity; - ae_int_t len0; - ae_int_t len1; - ae_int_t i; - ae_int_t j; - ae_int_t k; - double v; - double vv; - - - ae_assert(disttype==0||disttype==1, "EvaluateDistanceMatrixRec: incorrect DistType", _state); - - /* - * Normalize J0/J1: - * * J0:=max(J0,I0) - we ignore lower triangle - * * J1:=max(J1,J0) - normalize J1 - */ - j0 = ae_maxint(j0, i0, _state); - j1 = ae_maxint(j1, j0, _state); - if( j1<=j0||i1<=i0 ) - { - return; - } - - /* - * Try to process in parallel. Two condtions must hold in order to - * activate parallel processing: - * 1. I1-I0>2 or J1-J0>2 - * 2. (I1-I0)*(J1-J0)*NFeatures>=ParallelComplexity - * - * NOTE: all quantities are converted to reals in order to avoid - * integer overflow during multiplication - * - * NOTE: strict inequality in (1) is necessary to reduce task to 2x2 - * basecases. In future versions we will be able to handle such - * basecases more efficiently than 1x1 cases. - */ - rcomplexity = (double)(i1-i0); - rcomplexity = rcomplexity*(j1-j0); - rcomplexity = rcomplexity*nfeatures; - if( ae_fp_greater_eq(rcomplexity,clustering_parallelcomplexity)&&(i1-i0>2||j1-j0>2) ) - { - - /* - * Recursive division along largest of dimensions - */ - if( i1-i0>j1-j0 ) - { - splitlengtheven(i1-i0, &len0, &len1, _state); - clustering_evaluatedistancematrixrec(xy, nfeatures, disttype, d, i0, i0+len0, j0, j1, _state); - clustering_evaluatedistancematrixrec(xy, nfeatures, disttype, d, i0+len0, i1, j0, j1, _state); - } - else - { - splitlengtheven(j1-j0, &len0, &len1, _state); - clustering_evaluatedistancematrixrec(xy, nfeatures, disttype, d, i0, i1, j0, j0+len0, _state); - clustering_evaluatedistancematrixrec(xy, nfeatures, disttype, d, i0, i1, j0+len0, j1, _state); - } - return; - } - - /* - * Sequential processing - */ - for(i=i0; i<=i1-1; i++) + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) { - for(j=j0; j<=j1-1; j++) - { - if( j>=i ) - { - v = 0.0; - if( disttype==0 ) - { - for(k=0; k<=nfeatures-1; k++) - { - vv = xy->ptr.pp_double[i][k]-xy->ptr.pp_double[j][k]; - if( ae_fp_less(vv,(double)(0)) ) - { - vv = -vv; - } - if( ae_fp_greater(vv,v) ) - { - v = vv; - } - } - } - if( disttype==1 ) - { - for(k=0; k<=nfeatures-1; k++) - { - vv = xy->ptr.pp_double[i][k]-xy->ptr.pp_double[j][k]; - if( ae_fp_less(vv,(double)(0)) ) - { - vv = -vv; - } - v = v+vv; - } - } - d->ptr.pp_double[i][j] = v; - } - } +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return 0; +#endif } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + double result = alglib_impl::knnavgrelerror(const_cast(model.c_ptr()), const_cast(xy.c_ptr()), npoints, &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return *(reinterpret_cast(&result)); } +/************************************************************************* +Calculates all kinds of errors for the model in one call. -void _kmeansbuffers_init(void* _p, ae_state *_state) -{ - kmeansbuffers *p = (kmeansbuffers*)_p; - ae_touch_ptr((void*)p); - ae_matrix_init(&p->ct, 0, 0, DT_REAL, _state); - ae_matrix_init(&p->ctbest, 0, 0, DT_REAL, _state); - ae_vector_init(&p->xycbest, 0, DT_INT, _state); - ae_vector_init(&p->xycprev, 0, DT_INT, _state); - ae_vector_init(&p->d2, 0, DT_REAL, _state); - ae_vector_init(&p->csizes, 0, DT_INT, _state); - _apbuffers_init(&p->initbuf, _state); - ae_shared_pool_init(&p->updatepool, _state); -} - - -void _kmeansbuffers_init_copy(void* _dst, void* _src, ae_state *_state) -{ - kmeansbuffers *dst = (kmeansbuffers*)_dst; - kmeansbuffers *src = (kmeansbuffers*)_src; - ae_matrix_init_copy(&dst->ct, &src->ct, _state); - ae_matrix_init_copy(&dst->ctbest, &src->ctbest, _state); - ae_vector_init_copy(&dst->xycbest, &src->xycbest, _state); - ae_vector_init_copy(&dst->xycprev, &src->xycprev, _state); - ae_vector_init_copy(&dst->d2, &src->d2, _state); - ae_vector_init_copy(&dst->csizes, &src->csizes, _state); - _apbuffers_init_copy(&dst->initbuf, &src->initbuf, _state); - ae_shared_pool_init_copy(&dst->updatepool, &src->updatepool, _state); -} - - -void _kmeansbuffers_clear(void* _p) -{ - kmeansbuffers *p = (kmeansbuffers*)_p; - ae_touch_ptr((void*)p); - ae_matrix_clear(&p->ct); - ae_matrix_clear(&p->ctbest); - ae_vector_clear(&p->xycbest); - ae_vector_clear(&p->xycprev); - ae_vector_clear(&p->d2); - ae_vector_clear(&p->csizes); - _apbuffers_clear(&p->initbuf); - ae_shared_pool_clear(&p->updatepool); -} - - -void _kmeansbuffers_destroy(void* _p) -{ - kmeansbuffers *p = (kmeansbuffers*)_p; - ae_touch_ptr((void*)p); - ae_matrix_destroy(&p->ct); - ae_matrix_destroy(&p->ctbest); - ae_vector_destroy(&p->xycbest); - ae_vector_destroy(&p->xycprev); - ae_vector_destroy(&p->d2); - ae_vector_destroy(&p->csizes); - _apbuffers_destroy(&p->initbuf); - ae_shared_pool_destroy(&p->updatepool); -} - - -void _clusterizerstate_init(void* _p, ae_state *_state) -{ - clusterizerstate *p = (clusterizerstate*)_p; - ae_touch_ptr((void*)p); - ae_matrix_init(&p->xy, 0, 0, DT_REAL, _state); - ae_matrix_init(&p->d, 0, 0, DT_REAL, _state); - ae_matrix_init(&p->tmpd, 0, 0, DT_REAL, _state); - _apbuffers_init(&p->distbuf, _state); - _kmeansbuffers_init(&p->kmeanstmp, _state); -} - - -void _clusterizerstate_init_copy(void* _dst, void* _src, ae_state *_state) -{ - clusterizerstate *dst = (clusterizerstate*)_dst; - clusterizerstate *src = (clusterizerstate*)_src; - dst->npoints = src->npoints; - dst->nfeatures = src->nfeatures; - dst->disttype = src->disttype; - ae_matrix_init_copy(&dst->xy, &src->xy, _state); - ae_matrix_init_copy(&dst->d, &src->d, _state); - dst->ahcalgo = src->ahcalgo; - dst->kmeansrestarts = src->kmeansrestarts; - dst->kmeansmaxits = src->kmeansmaxits; - dst->kmeansinitalgo = src->kmeansinitalgo; - dst->kmeansdbgnoits = src->kmeansdbgnoits; - ae_matrix_init_copy(&dst->tmpd, &src->tmpd, _state); - _apbuffers_init_copy(&dst->distbuf, &src->distbuf, _state); - _kmeansbuffers_init_copy(&dst->kmeanstmp, &src->kmeanstmp, _state); -} - +INPUT PARAMETERS: + Model - KNN model + XY - test set: + * one row per point + * first NVars columns store independent variables + * depending on problem type: + * next column stores class number in [0,NClasses) - for + classification problems + * next NOut columns store dependent variables - for + regression problems + NPoints - test set size, NPoints>=0 -void _clusterizerstate_clear(void* _p) -{ - clusterizerstate *p = (clusterizerstate*)_p; - ae_touch_ptr((void*)p); - ae_matrix_clear(&p->xy); - ae_matrix_clear(&p->d); - ae_matrix_clear(&p->tmpd); - _apbuffers_clear(&p->distbuf); - _kmeansbuffers_clear(&p->kmeanstmp); -} +OUTPUT PARAMETERS: + Rep - following fields are loaded with errors for both regression + and classification models: + * rep.rmserror - RMS error for the output + * rep.avgerror - average error + * rep.avgrelerror - average relative error + following fields are set only for classification models, + zero for regression ones: + * relclserror - relative classification error, in [0,1] + * avgce - average cross-entropy in bits per dataset entry +NOTE: the cross-entropy metric is too unstable when used to evaluate KNN + models (such models can report exactly zero probabilities), so we + do not recommend using it. -void _clusterizerstate_destroy(void* _p) + -- ALGLIB -- + Copyright 15.02.2019 by Bochkanov Sergey +*************************************************************************/ +void knnallerrors(const knnmodel &model, const real_2d_array &xy, const ae_int_t npoints, knnreport &rep, const xparams _xparams) { - clusterizerstate *p = (clusterizerstate*)_p; - ae_touch_ptr((void*)p); - ae_matrix_destroy(&p->xy); - ae_matrix_destroy(&p->d); - ae_matrix_destroy(&p->tmpd); - _apbuffers_destroy(&p->distbuf); - _kmeansbuffers_destroy(&p->kmeanstmp); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::knnallerrors(const_cast(model.c_ptr()), const_cast(xy.c_ptr()), npoints, const_cast(rep.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } +#endif +#if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD) +/************************************************************************* +k-means++ clusterization. +Backward compatibility function, we recommend to use CLUSTERING subpackage +as better replacement. -void _ahcreport_init(void* _p, ae_state *_state) + -- ALGLIB -- + Copyright 21.03.2009 by Bochkanov Sergey +*************************************************************************/ +void kmeansgenerate(const real_2d_array &xy, const ae_int_t npoints, const ae_int_t nvars, const ae_int_t k, const ae_int_t restarts, ae_int_t &info, real_2d_array &c, integer_1d_array &xyc, const xparams _xparams) { - ahcreport *p = (ahcreport*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->p, 0, DT_INT, _state); - ae_matrix_init(&p->z, 0, 0, DT_INT, _state); - ae_matrix_init(&p->pz, 0, 0, DT_INT, _state); - ae_matrix_init(&p->pm, 0, 0, DT_INT, _state); - ae_vector_init(&p->mergedist, 0, DT_REAL, _state); + jmp_buf _break_jump; + alglib_impl::ae_state _alglib_env_state; + alglib_impl::ae_state_init(&_alglib_env_state); + if( setjmp(_break_jump) ) + { +#if !defined(AE_NO_EXCEPTIONS) + _ALGLIB_CPP_EXCEPTION(_alglib_env_state.error_msg); +#else + _ALGLIB_SET_ERROR_FLAG(_alglib_env_state.error_msg); + return; +#endif + } + ae_state_set_break_jump(&_alglib_env_state, &_break_jump); + if( _xparams.flags!=0x0 ) + ae_state_set_flags(&_alglib_env_state, _xparams.flags); + alglib_impl::kmeansgenerate(const_cast(xy.c_ptr()), npoints, nvars, k, restarts, &info, const_cast(c.c_ptr()), const_cast(xyc.c_ptr()), &_alglib_env_state); + alglib_impl::ae_state_clear(&_alglib_env_state); + return; } - - -void _ahcreport_init_copy(void* _dst, void* _src, ae_state *_state) -{ - ahcreport *dst = (ahcreport*)_dst; - ahcreport *src = (ahcreport*)_src; - dst->terminationtype = src->terminationtype; - dst->npoints = src->npoints; - ae_vector_init_copy(&dst->p, &src->p, _state); - ae_matrix_init_copy(&dst->z, &src->z, _state); - ae_matrix_init_copy(&dst->pz, &src->pz, _state); - ae_matrix_init_copy(&dst->pm, &src->pm, _state); - ae_vector_init_copy(&dst->mergedist, &src->mergedist, _state); +#endif } - -void _ahcreport_clear(void* _p) +///////////////////////////////////////////////////////////////////////// +// +// THIS SECTION CONTAINS IMPLEMENTATION OF COMPUTATIONAL CORE +// +///////////////////////////////////////////////////////////////////////// +namespace alglib_impl { - ahcreport *p = (ahcreport*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->p); - ae_matrix_clear(&p->z); - ae_matrix_clear(&p->pz); - ae_matrix_clear(&p->pm); - ae_vector_clear(&p->mergedist); -} +#if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD) -void _ahcreport_destroy(void* _p) -{ - ahcreport *p = (ahcreport*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->p); - ae_matrix_destroy(&p->z); - ae_matrix_destroy(&p->pz); - ae_matrix_destroy(&p->pm); - ae_vector_destroy(&p->mergedist); -} +#endif +#if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD) +static double bdss_xlny(double x, double y, ae_state *_state); +static double bdss_getcv(/* Integer */ ae_vector* cnt, + ae_int_t nc, + ae_state *_state); +static void bdss_tieaddc(/* Integer */ ae_vector* c, + /* Integer */ ae_vector* ties, + ae_int_t ntie, + ae_int_t nc, + /* Integer */ ae_vector* cnt, + ae_state *_state); +static void bdss_tiesubc(/* Integer */ ae_vector* c, + /* Integer */ ae_vector* ties, + ae_int_t ntie, + ae_int_t nc, + /* Integer */ ae_vector* cnt, + ae_state *_state); -void _kmeansreport_init(void* _p, ae_state *_state) -{ - kmeansreport *p = (kmeansreport*)_p; - ae_touch_ptr((void*)p); - ae_matrix_init(&p->c, 0, 0, DT_REAL, _state); - ae_vector_init(&p->cidx, 0, DT_INT, _state); -} - - -void _kmeansreport_init_copy(void* _dst, void* _src, ae_state *_state) -{ - kmeansreport *dst = (kmeansreport*)_dst; - kmeansreport *src = (kmeansreport*)_src; - dst->npoints = src->npoints; - dst->nfeatures = src->nfeatures; - dst->terminationtype = src->terminationtype; - dst->iterationscount = src->iterationscount; - dst->energy = src->energy; - dst->k = src->k; - ae_matrix_init_copy(&dst->c, &src->c, _state); - ae_vector_init_copy(&dst->cidx, &src->cidx, _state); -} - - -void _kmeansreport_clear(void* _p) -{ - kmeansreport *p = (kmeansreport*)_p; - ae_touch_ptr((void*)p); - ae_matrix_clear(&p->c); - ae_vector_clear(&p->cidx); -} - +#endif +#if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD) +static ae_int_t mlpbase_mlpvnum = 7; +static ae_int_t mlpbase_mlpfirstversion = 0; +static ae_int_t mlpbase_nfieldwidth = 4; +static ae_int_t mlpbase_hlconnfieldwidth = 5; +static ae_int_t mlpbase_hlnfieldwidth = 4; +static ae_int_t mlpbase_gradbasecasecost = 50000; +static ae_int_t mlpbase_microbatchsize = 64; +static void mlpbase_addinputlayer(ae_int_t ncount, + /* Integer */ ae_vector* lsizes, + /* Integer */ ae_vector* ltypes, + /* Integer */ ae_vector* lconnfirst, + /* Integer */ ae_vector* lconnlast, + ae_int_t* lastproc, + ae_state *_state); +static void mlpbase_addbiasedsummatorlayer(ae_int_t ncount, + /* Integer */ ae_vector* lsizes, + /* Integer */ ae_vector* ltypes, + /* Integer */ ae_vector* lconnfirst, + /* Integer */ ae_vector* lconnlast, + ae_int_t* lastproc, + ae_state *_state); +static void mlpbase_addactivationlayer(ae_int_t functype, + /* Integer */ ae_vector* lsizes, + /* Integer */ ae_vector* ltypes, + /* Integer */ ae_vector* lconnfirst, + /* Integer */ ae_vector* lconnlast, + ae_int_t* lastproc, + ae_state *_state); +static void mlpbase_addzerolayer(/* Integer */ ae_vector* lsizes, + /* Integer */ ae_vector* ltypes, + /* Integer */ ae_vector* lconnfirst, + /* Integer */ ae_vector* lconnlast, + ae_int_t* lastproc, + ae_state *_state); +static void mlpbase_hladdinputlayer(multilayerperceptron* network, + ae_int_t* connidx, + ae_int_t* neuroidx, + ae_int_t* structinfoidx, + ae_int_t nin, + ae_state *_state); +static void mlpbase_hladdoutputlayer(multilayerperceptron* network, + ae_int_t* connidx, + ae_int_t* neuroidx, + ae_int_t* structinfoidx, + ae_int_t* weightsidx, + ae_int_t k, + ae_int_t nprev, + ae_int_t nout, + ae_bool iscls, + ae_bool islinearout, + ae_state *_state); +static void mlpbase_hladdhiddenlayer(multilayerperceptron* network, + ae_int_t* connidx, + ae_int_t* neuroidx, + ae_int_t* structinfoidx, + ae_int_t* weightsidx, + ae_int_t k, + ae_int_t nprev, + ae_int_t ncur, + ae_state *_state); +static void mlpbase_fillhighlevelinformation(multilayerperceptron* network, + ae_int_t nin, + ae_int_t nhid1, + ae_int_t nhid2, + ae_int_t nout, + ae_bool iscls, + ae_bool islinearout, + ae_state *_state); +static void mlpbase_mlpcreate(ae_int_t nin, + ae_int_t nout, + /* Integer */ ae_vector* lsizes, + /* Integer */ ae_vector* ltypes, + /* Integer */ ae_vector* lconnfirst, + /* Integer */ ae_vector* lconnlast, + ae_int_t layerscount, + ae_bool isclsnet, + multilayerperceptron* network, + ae_state *_state); +static void mlpbase_mlphessianbatchinternal(multilayerperceptron* network, + /* Real */ ae_matrix* xy, + ae_int_t ssize, + ae_bool naturalerr, + double* e, + /* Real */ ae_vector* grad, + /* Real */ ae_matrix* h, + ae_state *_state); +static void mlpbase_mlpinternalcalculategradient(multilayerperceptron* network, + /* Real */ ae_vector* neurons, + /* Real */ ae_vector* weights, + /* Real */ ae_vector* derror, + /* Real */ ae_vector* grad, + ae_bool naturalerrorfunc, + ae_state *_state); +static void mlpbase_mlpchunkedgradient(multilayerperceptron* network, + /* Real */ ae_matrix* xy, + ae_int_t cstart, + ae_int_t csize, + /* Real */ ae_vector* batch4buf, + /* Real */ ae_vector* hpcbuf, + double* e, + ae_bool naturalerrorfunc, + ae_state *_state); +static void mlpbase_mlpchunkedprocess(multilayerperceptron* network, + /* Real */ ae_matrix* xy, + ae_int_t cstart, + ae_int_t csize, + /* Real */ ae_vector* batch4buf, + /* Real */ ae_vector* hpcbuf, + ae_state *_state); +static double mlpbase_safecrossentropy(double t, + double z, + ae_state *_state); +static void mlpbase_randomizebackwardpass(multilayerperceptron* network, + ae_int_t neuronidx, + double v, + ae_state *_state); -void _kmeansreport_destroy(void* _p) -{ - kmeansreport *p = (kmeansreport*)_p; - ae_touch_ptr((void*)p); - ae_matrix_destroy(&p->c); - ae_vector_destroy(&p->cidx); -} +#endif +#if defined(AE_COMPILE_LDA) || !defined(AE_PARTIAL_BUILD) +#endif +#if defined(AE_COMPILE_SSA) || !defined(AE_PARTIAL_BUILD) +static ae_bool ssa_hassomethingtoanalyze(ssamodel* s, ae_state *_state); +static ae_bool ssa_issequencebigenough(ssamodel* s, + ae_int_t i, + ae_state *_state); +static void ssa_updatebasis(ssamodel* s, + ae_int_t appendlen, + double updateits, + ae_state *_state); +static void ssa_analyzesequence(ssamodel* s, + /* Real */ ae_vector* data, + ae_int_t i0, + ae_int_t i1, + /* Real */ ae_vector* trend, + /* Real */ ae_vector* noise, + ae_int_t offs, + ae_state *_state); +static void ssa_forecastavgsequence(ssamodel* s, + /* Real */ ae_vector* data, + ae_int_t i0, + ae_int_t i1, + ae_int_t m, + ae_int_t forecastlen, + ae_bool smooth, + /* Real */ ae_vector* trend, + ae_int_t offs, + ae_state *_state); +static void ssa_realtimedequeue(ssamodel* s, + double beta, + ae_int_t cnt, + ae_state *_state); +static void ssa_updatexxtprepare(ssamodel* s, + ae_int_t updatesize, + ae_int_t windowwidth, + ae_int_t memorylimit, + ae_state *_state); +static void ssa_updatexxtsend(ssamodel* s, + /* Real */ ae_vector* u, + ae_int_t i0, + /* Real */ ae_matrix* xxt, + ae_state *_state); +static void ssa_updatexxtfinalize(ssamodel* s, + /* Real */ ae_matrix* xxt, + ae_state *_state); -/************************************************************************* -k-means++ clusterization. -Backward compatibility function, we recommend to use CLUSTERING subpackage -as better replacement. - -- ALGLIB -- - Copyright 21.03.2009 by Bochkanov Sergey -*************************************************************************/ -void kmeansgenerate(/* Real */ ae_matrix* xy, +#endif +#if defined(AE_COMPILE_LINREG) || !defined(AE_PARTIAL_BUILD) +static ae_int_t linreg_lrvnum = 5; +static void linreg_lrinternal(/* Real */ ae_matrix* xy, + /* Real */ ae_vector* s, ae_int_t npoints, ae_int_t nvars, - ae_int_t k, - ae_int_t restarts, ae_int_t* info, - /* Real */ ae_matrix* c, - /* Integer */ ae_vector* xyc, - ae_state *_state) -{ - ae_frame _frame_block; - ae_matrix dummy; - ae_int_t itscnt; - double e; - kmeansbuffers buf; - - ae_frame_make(_state, &_frame_block); - *info = 0; - ae_matrix_clear(c); - ae_vector_clear(xyc); - ae_matrix_init(&dummy, 0, 0, DT_REAL, _state); - _kmeansbuffers_init(&buf, _state); - - kmeansinitbuf(&buf, _state); - kmeansgenerateinternal(xy, npoints, nvars, k, 0, 0, restarts, ae_false, info, &itscnt, c, ae_true, &dummy, ae_false, xyc, &e, &buf, _state); - ae_frame_leave(_state); -} - - - + linearmodel* lm, + lrreport* ar, + ae_state *_state); -/************************************************************************* -This subroutine builds random decision forest. -INPUT PARAMETERS: - XY - training set - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - task type: - * NClasses=1 - regression task with one - dependent variable - * NClasses>1 - classification task with - NClasses classes. - NTrees - number of trees in a forest, NTrees>=1. - recommended values: 50-100. - R - percent of a training set used to build - individual trees. 01). - * 1, if task has been solved - DF - model built - Rep - training report, contains error on a training set - and out-of-bag estimates of generalization error. - -- ALGLIB -- - Copyright 19.02.2009 by Bochkanov Sergey -*************************************************************************/ -void dfbuildrandomdecisionforest(/* Real */ ae_matrix* xy, +#endif +#if defined(AE_COMPILE_LOGIT) || !defined(AE_PARTIAL_BUILD) +static double logit_xtol = 100*ae_machineepsilon; +static double logit_ftol = 0.0001; +static double logit_gtol = 0.3; +static ae_int_t logit_maxfev = 20; +static double logit_stpmin = 1.0E-2; +static double logit_stpmax = 1.0E5; +static ae_int_t logit_logitvnum = 6; +static void logit_mnliexp(/* Real */ ae_vector* w, + /* Real */ ae_vector* x, + ae_state *_state); +static void logit_mnlallerrors(logitmodel* lm, + /* Real */ ae_matrix* xy, ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t ntrees, - double r, + double* relcls, + double* avgce, + double* rms, + double* avg, + double* avgrel, + ae_state *_state); +static void logit_mnlmcsrch(ae_int_t n, + /* Real */ ae_vector* x, + double* f, + /* Real */ ae_vector* g, + /* Real */ ae_vector* s, + double* stp, ae_int_t* info, - decisionforest* df, - dfreport* rep, - ae_state *_state) -{ - ae_int_t samplesize; - - *info = 0; - _decisionforest_clear(df); - _dfreport_clear(rep); + ae_int_t* nfev, + /* Real */ ae_vector* wa, + logitmcstate* state, + ae_int_t* stage, + ae_state *_state); +static void logit_mnlmcstep(double* stx, + double* fx, + double* dx, + double* sty, + double* fy, + double* dy, + double* stp, + double fp, + double dp, + ae_bool* brackt, + double stmin, + double stmax, + ae_int_t* info, + ae_state *_state); - if( ae_fp_less_eq(r,(double)(0))||ae_fp_greater(r,(double)(1)) ) - { - *info = -1; - return; - } - samplesize = ae_maxint(ae_round(r*npoints, _state), 1, _state); - dfbuildinternal(xy, npoints, nvars, nclasses, ntrees, samplesize, ae_maxint(nvars/2, 1, _state), dforest_dfusestrongsplits+dforest_dfuseevs, info, df, rep, _state); -} +#endif +#if defined(AE_COMPILE_MCPD) || !defined(AE_PARTIAL_BUILD) +static double mcpd_xtol = 1.0E-8; +static void mcpd_mcpdinit(ae_int_t n, + ae_int_t entrystate, + ae_int_t exitstate, + mcpdstate* s, + ae_state *_state); -/************************************************************************* -This subroutine builds random decision forest. -This function gives ability to tune number of variables used when choosing -best split. -INPUT PARAMETERS: - XY - training set - NPoints - training set size, NPoints>=1 - NVars - number of independent variables, NVars>=1 - NClasses - task type: - * NClasses=1 - regression task with one - dependent variable - * NClasses>1 - classification task with - NClasses classes. - NTrees - number of trees in a forest, NTrees>=1. - recommended values: 50-100. - NRndVars - number of variables used when choosing best split - R - percent of a training set used to build - individual trees. 01). - * 1, if task has been solved - DF - model built - Rep - training report, contains error on a training set - and out-of-bag estimates of generalization error. - -- ALGLIB -- - Copyright 19.02.2009 by Bochkanov Sergey -*************************************************************************/ -void dfbuildrandomdecisionforestx1(/* Real */ ae_matrix* xy, +#endif +#if defined(AE_COMPILE_MLPTRAIN) || !defined(AE_PARTIAL_BUILD) +static double mlptrain_mindecay = 0.001; +static ae_int_t mlptrain_defaultlbfgsfactor = 6; +static void mlptrain_mlpkfoldcvgeneral(multilayerperceptron* n, + /* Real */ ae_matrix* xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + ae_int_t foldscount, + ae_bool lmalgorithm, + double wstep, + ae_int_t maxits, + ae_int_t* info, + mlpreport* rep, + mlpcvreport* cvrep, + ae_state *_state); +static void mlptrain_mlpkfoldsplit(/* Real */ ae_matrix* xy, ae_int_t npoints, - ae_int_t nvars, ae_int_t nclasses, - ae_int_t ntrees, - ae_int_t nrndvars, - double r, + ae_int_t foldscount, + ae_bool stratifiedsplits, + /* Integer */ ae_vector* folds, + ae_state *_state); +static void mlptrain_mthreadcv(mlptrainer* s, + ae_int_t rowsize, + ae_int_t nrestarts, + /* Integer */ ae_vector* folds, + ae_int_t fold, + ae_int_t dfold, + /* Real */ ae_matrix* cvy, + ae_shared_pool* pooldatacv, + ae_int_t wcount, + ae_state *_state); +ae_bool _trypexec_mlptrain_mthreadcv(mlptrainer* s, + ae_int_t rowsize, + ae_int_t nrestarts, + /* Integer */ ae_vector* folds, + ae_int_t fold, + ae_int_t dfold, + /* Real */ ae_matrix* cvy, + ae_shared_pool* pooldatacv, + ae_int_t wcount, ae_state *_state); +static void mlptrain_mlptrainnetworkx(mlptrainer* s, + ae_int_t nrestarts, + ae_int_t algokind, + /* Integer */ ae_vector* trnsubset, + ae_int_t trnsubsetsize, + /* Integer */ ae_vector* valsubset, + ae_int_t valsubsetsize, + multilayerperceptron* network, + mlpreport* rep, + ae_bool isrootcall, + ae_shared_pool* sessions, + ae_state *_state); +ae_bool _trypexec_mlptrain_mlptrainnetworkx(mlptrainer* s, + ae_int_t nrestarts, + ae_int_t algokind, + /* Integer */ ae_vector* trnsubset, + ae_int_t trnsubsetsize, + /* Integer */ ae_vector* valsubset, + ae_int_t valsubsetsize, + multilayerperceptron* network, + mlpreport* rep, + ae_bool isrootcall, + ae_shared_pool* sessions, ae_state *_state); +static void mlptrain_mlptrainensemblex(mlptrainer* s, + mlpensemble* ensemble, + ae_int_t idx0, + ae_int_t idx1, + ae_int_t nrestarts, + ae_int_t trainingmethod, + sinteger* ngrad, + ae_bool isrootcall, + ae_shared_pool* esessions, + ae_state *_state); +ae_bool _trypexec_mlptrain_mlptrainensemblex(mlptrainer* s, + mlpensemble* ensemble, + ae_int_t idx0, + ae_int_t idx1, + ae_int_t nrestarts, + ae_int_t trainingmethod, + sinteger* ngrad, + ae_bool isrootcall, + ae_shared_pool* esessions, ae_state *_state); +static void mlptrain_mlpstarttrainingx(mlptrainer* s, + ae_bool randomstart, + ae_int_t algokind, + /* Integer */ ae_vector* subset, + ae_int_t subsetsize, + smlptrnsession* session, + ae_state *_state); +static ae_bool mlptrain_mlpcontinuetrainingx(mlptrainer* s, + /* Integer */ ae_vector* subset, + ae_int_t subsetsize, + ae_int_t* ngradbatch, + smlptrnsession* session, + ae_state *_state); +static void mlptrain_mlpebagginginternal(mlpensemble* ensemble, + /* Real */ ae_matrix* xy, + ae_int_t npoints, + double decay, + ae_int_t restarts, + double wstep, + ae_int_t maxits, + ae_bool lmalgorithm, ae_int_t* info, - decisionforest* df, - dfreport* rep, - ae_state *_state) -{ - ae_int_t samplesize; - - *info = 0; - _decisionforest_clear(df); - _dfreport_clear(rep); - - if( ae_fp_less_eq(r,(double)(0))||ae_fp_greater(r,(double)(1)) ) - { - *info = -1; - return; - } - if( nrndvars<=0||nrndvars>nvars ) - { - *info = -1; - return; - } - samplesize = ae_maxint(ae_round(r*npoints, _state), 1, _state); - dfbuildinternal(xy, npoints, nvars, nclasses, ntrees, samplesize, nrndvars, dforest_dfusestrongsplits+dforest_dfuseevs, info, df, rep, _state); -} + mlpreport* rep, + mlpcvreport* ooberrors, + ae_state *_state); +static void mlptrain_initmlptrnsession(multilayerperceptron* networktrained, + ae_bool randomizenetwork, + mlptrainer* trainer, + smlptrnsession* session, + ae_state *_state); +static void mlptrain_initmlptrnsessions(multilayerperceptron* networktrained, + ae_bool randomizenetwork, + mlptrainer* trainer, + ae_shared_pool* sessions, + ae_state *_state); +static void mlptrain_initmlpetrnsession(multilayerperceptron* individualnetwork, + mlptrainer* trainer, + mlpetrnsession* session, + ae_state *_state); +static void mlptrain_initmlpetrnsessions(multilayerperceptron* individualnetwork, + mlptrainer* trainer, + ae_shared_pool* sessions, + ae_state *_state); -void dfbuildinternal(/* Real */ ae_matrix* xy, +#endif +#if defined(AE_COMPILE_CLUSTERING) || !defined(AE_PARTIAL_BUILD) +static ae_int_t clustering_kmeansblocksize = 32; +static ae_int_t clustering_kmeansparalleldim = 8; +static ae_int_t clustering_kmeansparallelk = 4; +static double clustering_complexitymultiplier = 1.0; +static void clustering_selectinitialcenters(/* Real */ ae_matrix* xy, ae_int_t npoints, ae_int_t nvars, - ae_int_t nclasses, - ae_int_t ntrees, - ae_int_t samplesize, + ae_int_t initalgo, + hqrndstate* rs, + ae_int_t k, + /* Real */ ae_matrix* ct, + apbuffers* initbuf, + ae_shared_pool* updatepool, + ae_state *_state); +static ae_bool clustering_fixcenters(/* Real */ ae_matrix* xy, + ae_int_t npoints, + ae_int_t nvars, + /* Real */ ae_matrix* ct, + ae_int_t k, + apbuffers* initbuf, + ae_shared_pool* updatepool, + ae_state *_state); +static void clustering_clusterizerrunahcinternal(clusterizerstate* s, + /* Real */ ae_matrix* d, + ahcreport* rep, + ae_state *_state); +static void clustering_evaluatedistancematrixrec(/* Real */ ae_matrix* xy, ae_int_t nfeatures, - ae_int_t flags, - ae_int_t* info, - decisionforest* df, - dfreport* rep, - ae_state *_state) -{ - ae_frame _frame_block; - ae_int_t i; - ae_int_t j; - ae_int_t k; - ae_int_t tmpi; - ae_int_t lasttreeoffs; - ae_int_t offs; - ae_int_t ooboffs; - ae_int_t treesize; - ae_int_t nvarsinpool; - ae_bool useevs; - dfinternalbuffers bufs; - ae_vector permbuf; - ae_vector oobbuf; - ae_vector oobcntbuf; - ae_matrix xys; - ae_vector x; - ae_vector y; - ae_int_t oobcnt; - ae_int_t oobrelcnt; - double v; - double vmin; - double vmax; - ae_bool bflag; - hqrndstate rs; + ae_int_t disttype, + /* Real */ ae_matrix* d, + ae_int_t i0, + ae_int_t i1, + ae_int_t j0, + ae_int_t j1, + ae_state *_state); +ae_bool _trypexec_clustering_evaluatedistancematrixrec(/* Real */ ae_matrix* xy, + ae_int_t nfeatures, + ae_int_t disttype, + /* Real */ ae_matrix* d, + ae_int_t i0, + ae_int_t i1, + ae_int_t j0, + ae_int_t j1, ae_state *_state); - ae_frame_make(_state, &_frame_block); - *info = 0; - _decisionforest_clear(df); - _dfreport_clear(rep); - _dfinternalbuffers_init(&bufs, _state); - ae_vector_init(&permbuf, 0, DT_INT, _state); - ae_vector_init(&oobbuf, 0, DT_REAL, _state); - ae_vector_init(&oobcntbuf, 0, DT_INT, _state); - ae_matrix_init(&xys, 0, 0, DT_REAL, _state); - ae_vector_init(&x, 0, DT_REAL, _state); - ae_vector_init(&y, 0, DT_REAL, _state); - _hqrndstate_init(&rs, _state); + +#endif +#if defined(AE_COMPILE_DFOREST) || !defined(AE_PARTIAL_BUILD) +static ae_int_t dforest_innernodewidth = 3; +static ae_int_t dforest_leafnodewidth = 2; +static ae_int_t dforest_dfusestrongsplits = 1; +static ae_int_t dforest_dfuseevs = 2; +static ae_int_t dforest_dfuncompressedv0 = 0; +static ae_int_t dforest_dfcompressedv0 = 1; +static ae_int_t dforest_needtrngini = 1; +static ae_int_t dforest_needoobgini = 2; +static ae_int_t dforest_needpermutation = 3; +static ae_int_t dforest_permutationimportancebatchsize = 512; +static void dforest_buildrandomtree(decisionforestbuilder* s, + ae_int_t treeidx0, + ae_int_t treeidx1, + ae_state *_state); +ae_bool _trypexec_dforest_buildrandomtree(decisionforestbuilder* s, + ae_int_t treeidx0, + ae_int_t treeidx1, ae_state *_state); +static void dforest_buildrandomtreerec(decisionforestbuilder* s, + dfworkbuf* workbuf, + ae_int_t workingset, + ae_int_t varstoselect, + /* Real */ ae_vector* treebuf, + dfvotebuf* votebuf, + hqrndstate* rs, + ae_int_t idx0, + ae_int_t idx1, + ae_int_t oobidx0, + ae_int_t oobidx1, + double meanloss, + double topmostmeanloss, + ae_int_t* treesize, + ae_state *_state); +static void dforest_estimatevariableimportance(decisionforestbuilder* s, + ae_int_t sessionseed, + decisionforest* df, + ae_int_t ntrees, + dfreport* rep, + ae_state *_state); +ae_bool _trypexec_dforest_estimatevariableimportance(decisionforestbuilder* s, + ae_int_t sessionseed, + decisionforest* df, + ae_int_t ntrees, + dfreport* rep, ae_state *_state); +static void dforest_estimatepermutationimportances(decisionforestbuilder* s, + decisionforest* df, + ae_int_t ntrees, + ae_shared_pool* permpool, + ae_int_t idx0, + ae_int_t idx1, + ae_state *_state); +ae_bool _trypexec_dforest_estimatepermutationimportances(decisionforestbuilder* s, + decisionforest* df, + ae_int_t ntrees, + ae_shared_pool* permpool, + ae_int_t idx0, + ae_int_t idx1, ae_state *_state); +static void dforest_cleanreport(decisionforestbuilder* s, + dfreport* rep, + ae_state *_state); +static double dforest_meannrms2(ae_int_t nclasses, + /* Integer */ ae_vector* trnlabelsi, + /* Real */ ae_vector* trnlabelsr, + ae_int_t trnidx0, + ae_int_t trnidx1, + /* Integer */ ae_vector* tstlabelsi, + /* Real */ ae_vector* tstlabelsr, + ae_int_t tstidx0, + ae_int_t tstidx1, + /* Integer */ ae_vector* tmpi, + ae_state *_state); +static void dforest_choosecurrentsplitdense(decisionforestbuilder* s, + dfworkbuf* workbuf, + ae_int_t* varsinpool, + ae_int_t varstoselect, + hqrndstate* rs, + ae_int_t idx0, + ae_int_t idx1, + ae_int_t* varbest, + double* splitbest, + ae_state *_state); +static void dforest_evaluatedensesplit(decisionforestbuilder* s, + dfworkbuf* workbuf, + hqrndstate* rs, + ae_int_t splitvar, + ae_int_t idx0, + ae_int_t idx1, + ae_int_t* info, + double* split, + double* rms, + ae_state *_state); +static void dforest_classifiersplit(decisionforestbuilder* s, + dfworkbuf* workbuf, + /* Real */ ae_vector* x, + /* Integer */ ae_vector* c, + ae_int_t n, + hqrndstate* rs, + ae_int_t* info, + double* threshold, + double* e, + /* Real */ ae_vector* sortrbuf, + /* Integer */ ae_vector* sortibuf, + ae_state *_state); +static void dforest_regressionsplit(decisionforestbuilder* s, + dfworkbuf* workbuf, + /* Real */ ae_vector* x, + /* Real */ ae_vector* y, + ae_int_t n, + ae_int_t* info, + double* threshold, + double* e, + /* Real */ ae_vector* sortrbuf, + /* Real */ ae_vector* sortrbuf2, + ae_state *_state); +static double dforest_getsplit(decisionforestbuilder* s, + double a, + double b, + hqrndstate* rs, + ae_state *_state); +static void dforest_outputleaf(decisionforestbuilder* s, + dfworkbuf* workbuf, + /* Real */ ae_vector* treebuf, + dfvotebuf* votebuf, + ae_int_t idx0, + ae_int_t idx1, + ae_int_t oobidx0, + ae_int_t oobidx1, + ae_int_t* treesize, + double leafval, + ae_state *_state); +static void dforest_analyzeandpreprocessdataset(decisionforestbuilder* s, + ae_state *_state); +static void dforest_mergetrees(decisionforestbuilder* s, + decisionforest* df, + ae_state *_state); +static void dforest_processvotingresults(decisionforestbuilder* s, + ae_int_t ntrees, + dfvotebuf* buf, + dfreport* rep, + ae_state *_state); +static double dforest_binarycompression(decisionforest* df, + ae_bool usemantissa8, + ae_state *_state); +static ae_int_t dforest_computecompressedsizerec(decisionforest* df, + ae_bool usemantissa8, + ae_int_t treeroot, + ae_int_t treepos, + /* Integer */ ae_vector* compressedsizes, + ae_bool savecompressedsizes, + ae_state *_state); +static void dforest_compressrec(decisionforest* df, + ae_bool usemantissa8, + ae_int_t treeroot, + ae_int_t treepos, + /* Integer */ ae_vector* compressedsizes, + ae_vector* buf, + ae_int_t* dstoffs, + ae_state *_state); +static ae_int_t dforest_computecompresseduintsize(ae_int_t v, + ae_state *_state); +static void dforest_streamuint(ae_vector* buf, + ae_int_t* offs, + ae_int_t v, + ae_state *_state); +static ae_int_t dforest_unstreamuint(ae_vector* buf, + ae_int_t* offs, + ae_state *_state); +static void dforest_streamfloat(ae_vector* buf, + ae_bool usemantissa8, + ae_int_t* offs, + double v, + ae_state *_state); +static double dforest_unstreamfloat(ae_vector* buf, + ae_bool usemantissa8, + ae_int_t* offs, + ae_state *_state); +static ae_int_t dforest_dfclserror(decisionforest* df, + /* Real */ ae_matrix* xy, + ae_int_t npoints, + ae_state *_state); +static void dforest_dfprocessinternaluncompressed(decisionforest* df, + ae_int_t subtreeroot, + ae_int_t nodeoffs, + /* Real */ ae_vector* x, + /* Real */ ae_vector* y, + ae_state *_state); +static void dforest_dfprocessinternalcompressed(decisionforest* df, + ae_int_t offs, + /* Real */ ae_vector* x, + /* Real */ ae_vector* y, + ae_state *_state); +static double dforest_xfastpow(double r, ae_int_t n, ae_state *_state); + + +#endif +#if defined(AE_COMPILE_KNN) || !defined(AE_PARTIAL_BUILD) +static ae_int_t knn_knnfirstversion = 0; +static void knn_clearreport(knnreport* rep, ae_state *_state); +static void knn_processinternal(knnmodel* model, + knnbuffer* buf, + ae_state *_state); + + +#endif +#if defined(AE_COMPILE_DATACOMP) || !defined(AE_PARTIAL_BUILD) + + +#endif + +#if defined(AE_COMPILE_PCA) || !defined(AE_PARTIAL_BUILD) + + +/************************************************************************* +Principal components analysis + +This function builds orthogonal basis where first axis corresponds to +direction with maximum variance, second axis maximizes variance in the +subspace orthogonal to first axis and so on. + +This function builds FULL basis, i.e. returns N vectors corresponding to +ALL directions, no matter how informative. If you need just a few (say, +10 or 50) of the most important directions, you may find it faster to use +one of the reduced versions: +* pcatruncatedsubspace() - for subspace iteration based method + +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. + +INPUT PARAMETERS: + X - dataset, array[0..NPoints-1,0..NVars-1]. + matrix contains ONLY INDEPENDENT VARIABLES. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + +OUTPUT PARAMETERS: + Info - return code: + * -4, if SVD subroutine haven't converged + * -1, if wrong parameters has been passed (NPoints<0, + NVars<1) + * 1, if task is solved + S2 - array[0..NVars-1]. variance values corresponding + to basis vectors. + V - array[0..NVars-1,0..NVars-1] + matrix, whose columns store basis vectors. + + -- ALGLIB -- + Copyright 25.08.2008 by Bochkanov Sergey +*************************************************************************/ +void pcabuildbasis(/* Real */ ae_matrix* x, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t* info, + /* Real */ ae_vector* s2, + /* Real */ ae_matrix* v, + ae_state *_state) +{ + ae_frame _frame_block; + ae_matrix a; + ae_matrix u; + ae_matrix vt; + ae_vector m; + ae_vector t; + ae_int_t i; + ae_int_t j; + double mean; + double variance; + double skewness; + double kurtosis; + + ae_frame_make(_state, &_frame_block); + memset(&a, 0, sizeof(a)); + memset(&u, 0, sizeof(u)); + memset(&vt, 0, sizeof(vt)); + memset(&m, 0, sizeof(m)); + memset(&t, 0, sizeof(t)); + *info = 0; + ae_vector_clear(s2); + ae_matrix_clear(v); + ae_matrix_init(&a, 0, 0, DT_REAL, _state, ae_true); + ae_matrix_init(&u, 0, 0, DT_REAL, _state, ae_true); + ae_matrix_init(&vt, 0, 0, DT_REAL, _state, ae_true); + ae_vector_init(&m, 0, DT_REAL, _state, ae_true); + ae_vector_init(&t, 0, DT_REAL, _state, ae_true); /* - * Test for inputs - */ - if( (((((npoints<1||samplesize<1)||samplesize>npoints)||nvars<1)||nclasses<1)||ntrees<1)||nfeatures<1 ) - { - *info = -1; - ae_frame_leave(_state); - return; - } - if( nclasses>1 ) - { - for(i=0; i<=npoints-1; i++) - { - if( ae_round(xy->ptr.pp_double[i][nvars], _state)<0||ae_round(xy->ptr.pp_double[i][nvars], _state)>=nclasses ) - { - *info = -2; - ae_frame_leave(_state); - return; - } - } - } - *info = 1; - - /* - * Flags - */ - useevs = flags/dforest_dfuseevs%2!=0; - - /* - * Allocate data, prepare header + * Check input data */ - treesize = 1+dforest_innernodewidth*(samplesize-1)+dforest_leafnodewidth*samplesize; - ae_vector_set_length(&permbuf, npoints-1+1, _state); - ae_vector_set_length(&bufs.treebuf, treesize-1+1, _state); - ae_vector_set_length(&bufs.idxbuf, npoints-1+1, _state); - ae_vector_set_length(&bufs.tmpbufr, npoints-1+1, _state); - ae_vector_set_length(&bufs.tmpbufr2, npoints-1+1, _state); - ae_vector_set_length(&bufs.tmpbufi, npoints-1+1, _state); - ae_vector_set_length(&bufs.sortrbuf, npoints, _state); - ae_vector_set_length(&bufs.sortrbuf2, npoints, _state); - ae_vector_set_length(&bufs.sortibuf, npoints, _state); - ae_vector_set_length(&bufs.varpool, nvars-1+1, _state); - ae_vector_set_length(&bufs.evsbin, nvars-1+1, _state); - ae_vector_set_length(&bufs.evssplits, nvars-1+1, _state); - ae_vector_set_length(&bufs.classibuf, 2*nclasses-1+1, _state); - ae_vector_set_length(&oobbuf, nclasses*npoints-1+1, _state); - ae_vector_set_length(&oobcntbuf, npoints-1+1, _state); - ae_vector_set_length(&df->trees, ntrees*treesize-1+1, _state); - ae_matrix_set_length(&xys, samplesize-1+1, nvars+1, _state); - ae_vector_set_length(&x, nvars-1+1, _state); - ae_vector_set_length(&y, nclasses-1+1, _state); - for(i=0; i<=npoints-1; i++) - { - permbuf.ptr.p_int[i] = i; - } - for(i=0; i<=npoints*nclasses-1; i++) - { - oobbuf.ptr.p_double[i] = (double)(0); - } - for(i=0; i<=npoints-1; i++) + if( npoints<0||nvars<1 ) { - oobcntbuf.ptr.p_int[i] = 0; + *info = -1; + ae_frame_leave(_state); + return; } + *info = 1; /* - * Prepare variable pool and EVS (extended variable selection/splitting) buffers - * (whether EVS is turned on or not): - * 1. detect binary variables and pre-calculate splits for them - * 2. detect variables with non-distinct values and exclude them from pool + * Special case: NPoints=0 */ - for(i=0; i<=nvars-1; i++) - { - bufs.varpool.ptr.p_int[i] = i; - } - nvarsinpool = nvars; - if( useevs ) + if( npoints==0 ) { - for(j=0; j<=nvars-1; j++) + ae_vector_set_length(s2, nvars, _state); + ae_matrix_set_length(v, nvars, nvars, _state); + for(i=0; i<=nvars-1; i++) { - vmin = xy->ptr.pp_double[0][j]; - vmax = vmin; - for(i=0; i<=npoints-1; i++) - { - v = xy->ptr.pp_double[i][j]; - vmin = ae_minreal(vmin, v, _state); - vmax = ae_maxreal(vmax, v, _state); - } - if( ae_fp_eq(vmin,vmax) ) - { - - /* - * exclude variable from pool - */ - bufs.varpool.ptr.p_int[j] = bufs.varpool.ptr.p_int[nvarsinpool-1]; - bufs.varpool.ptr.p_int[nvarsinpool-1] = -1; - nvarsinpool = nvarsinpool-1; - continue; - } - bflag = ae_false; - for(i=0; i<=npoints-1; i++) + s2->ptr.p_double[i] = (double)(0); + } + for(i=0; i<=nvars-1; i++) + { + for(j=0; j<=nvars-1; j++) { - v = xy->ptr.pp_double[i][j]; - if( ae_fp_neq(v,vmin)&&ae_fp_neq(v,vmax) ) + if( i==j ) { - bflag = ae_true; - break; + v->ptr.pp_double[i][j] = (double)(1); } - } - if( bflag ) - { - - /* - * non-binary variable - */ - bufs.evsbin.ptr.p_bool[j] = ae_false; - } - else - { - - /* - * Prepare - */ - bufs.evsbin.ptr.p_bool[j] = ae_true; - bufs.evssplits.ptr.p_double[j] = 0.5*(vmin+vmax); - if( ae_fp_less_eq(bufs.evssplits.ptr.p_double[j],vmin) ) + else { - bufs.evssplits.ptr.p_double[j] = vmax; + v->ptr.pp_double[i][j] = (double)(0); } } } + ae_frame_leave(_state); + return; } /* - * RANDOM FOREST FORMAT - * W[0] - size of array - * W[1] - version number - * W[2] - NVars - * W[3] - NClasses (1 for regression) - * W[4] - NTrees - * W[5] - trees offset - * - * - * TREE FORMAT - * W[Offs] - size of sub-array - * node info: - * W[K+0] - variable number (-1 for leaf mode) - * W[K+1] - threshold (class/value for leaf node) - * W[K+2] - ">=" branch index (absent for leaf node) - * - */ - df->nvars = nvars; - df->nclasses = nclasses; - df->ntrees = ntrees; - - /* - * Build forest + * Calculate means */ - hqrndrandomize(&rs, _state); - offs = 0; - for(i=0; i<=ntrees-1; i++) + ae_vector_set_length(&m, nvars, _state); + ae_vector_set_length(&t, npoints, _state); + for(j=0; j<=nvars-1; j++) { - - /* - * Prepare sample - */ - for(k=0; k<=samplesize-1; k++) - { - j = k+hqrnduniformi(&rs, npoints-k, _state); - tmpi = permbuf.ptr.p_int[k]; - permbuf.ptr.p_int[k] = permbuf.ptr.p_int[j]; - permbuf.ptr.p_int[j] = tmpi; - j = permbuf.ptr.p_int[k]; - ae_v_move(&xys.ptr.pp_double[k][0], 1, &xy->ptr.pp_double[j][0], 1, ae_v_len(0,nvars)); - } - - /* - * build tree, copy - */ - dforest_dfbuildtree(&xys, samplesize, nvars, nclasses, nfeatures, nvarsinpool, flags, &bufs, &rs, _state); - j = ae_round(bufs.treebuf.ptr.p_double[0], _state); - ae_v_move(&df->trees.ptr.p_double[offs], 1, &bufs.treebuf.ptr.p_double[0], 1, ae_v_len(offs,offs+j-1)); - lasttreeoffs = offs; - offs = offs+j; - - /* - * OOB estimates - */ - for(k=samplesize; k<=npoints-1; k++) - { - for(j=0; j<=nclasses-1; j++) - { - y.ptr.p_double[j] = (double)(0); - } - j = permbuf.ptr.p_int[k]; - ae_v_move(&x.ptr.p_double[0], 1, &xy->ptr.pp_double[j][0], 1, ae_v_len(0,nvars-1)); - dforest_dfprocessinternal(df, lasttreeoffs, &x, &y, _state); - ae_v_add(&oobbuf.ptr.p_double[j*nclasses], 1, &y.ptr.p_double[0], 1, ae_v_len(j*nclasses,(j+1)*nclasses-1)); - oobcntbuf.ptr.p_int[j] = oobcntbuf.ptr.p_int[j]+1; - } + ae_v_move(&t.ptr.p_double[0], 1, &x->ptr.pp_double[0][j], x->stride, ae_v_len(0,npoints-1)); + samplemoments(&t, npoints, &mean, &variance, &skewness, &kurtosis, _state); + m.ptr.p_double[j] = mean; } - df->bufsize = offs; /* - * Normalize OOB results + * Center, apply SVD, prepare output */ + ae_matrix_set_length(&a, ae_maxint(npoints, nvars, _state), nvars, _state); for(i=0; i<=npoints-1; i++) { - if( oobcntbuf.ptr.p_int[i]!=0 ) - { - v = (double)1/(double)oobcntbuf.ptr.p_int[i]; - ae_v_muld(&oobbuf.ptr.p_double[i*nclasses], 1, ae_v_len(i*nclasses,i*nclasses+nclasses-1), v); - } + ae_v_move(&a.ptr.pp_double[i][0], 1, &x->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1)); + ae_v_sub(&a.ptr.pp_double[i][0], 1, &m.ptr.p_double[0], 1, ae_v_len(0,nvars-1)); } - - /* - * Calculate training set estimates - */ - rep->relclserror = dfrelclserror(df, xy, npoints, _state); - rep->avgce = dfavgce(df, xy, npoints, _state); - rep->rmserror = dfrmserror(df, xy, npoints, _state); - rep->avgerror = dfavgerror(df, xy, npoints, _state); - rep->avgrelerror = dfavgrelerror(df, xy, npoints, _state); - - /* - * Calculate OOB estimates. - */ - rep->oobrelclserror = (double)(0); - rep->oobavgce = (double)(0); - rep->oobrmserror = (double)(0); - rep->oobavgerror = (double)(0); - rep->oobavgrelerror = (double)(0); - oobcnt = 0; - oobrelcnt = 0; - for(i=0; i<=npoints-1; i++) + for(i=npoints; i<=nvars-1; i++) { - if( oobcntbuf.ptr.p_int[i]!=0 ) + for(j=0; j<=nvars-1; j++) { - ooboffs = i*nclasses; - if( nclasses>1 ) - { - - /* - * classification-specific code - */ - k = ae_round(xy->ptr.pp_double[i][nvars], _state); - tmpi = 0; - for(j=1; j<=nclasses-1; j++) - { - if( ae_fp_greater(oobbuf.ptr.p_double[ooboffs+j],oobbuf.ptr.p_double[ooboffs+tmpi]) ) - { - tmpi = j; - } - } - if( tmpi!=k ) - { - rep->oobrelclserror = rep->oobrelclserror+1; - } - if( ae_fp_neq(oobbuf.ptr.p_double[ooboffs+k],(double)(0)) ) - { - rep->oobavgce = rep->oobavgce-ae_log(oobbuf.ptr.p_double[ooboffs+k], _state); - } - else - { - rep->oobavgce = rep->oobavgce-ae_log(ae_minrealnumber, _state); - } - for(j=0; j<=nclasses-1; j++) - { - if( j==k ) - { - rep->oobrmserror = rep->oobrmserror+ae_sqr(oobbuf.ptr.p_double[ooboffs+j]-1, _state); - rep->oobavgerror = rep->oobavgerror+ae_fabs(oobbuf.ptr.p_double[ooboffs+j]-1, _state); - rep->oobavgrelerror = rep->oobavgrelerror+ae_fabs(oobbuf.ptr.p_double[ooboffs+j]-1, _state); - oobrelcnt = oobrelcnt+1; - } - else - { - rep->oobrmserror = rep->oobrmserror+ae_sqr(oobbuf.ptr.p_double[ooboffs+j], _state); - rep->oobavgerror = rep->oobavgerror+ae_fabs(oobbuf.ptr.p_double[ooboffs+j], _state); - } - } - } - else - { - - /* - * regression-specific code - */ - rep->oobrmserror = rep->oobrmserror+ae_sqr(oobbuf.ptr.p_double[ooboffs]-xy->ptr.pp_double[i][nvars], _state); - rep->oobavgerror = rep->oobavgerror+ae_fabs(oobbuf.ptr.p_double[ooboffs]-xy->ptr.pp_double[i][nvars], _state); - if( ae_fp_neq(xy->ptr.pp_double[i][nvars],(double)(0)) ) - { - rep->oobavgrelerror = rep->oobavgrelerror+ae_fabs((oobbuf.ptr.p_double[ooboffs]-xy->ptr.pp_double[i][nvars])/xy->ptr.pp_double[i][nvars], _state); - oobrelcnt = oobrelcnt+1; - } - } - - /* - * update OOB estimates count. - */ - oobcnt = oobcnt+1; + a.ptr.pp_double[i][j] = (double)(0); } } - if( oobcnt>0 ) + if( !rmatrixsvd(&a, ae_maxint(npoints, nvars, _state), nvars, 0, 1, 2, s2, &u, &vt, _state) ) + { + *info = -4; + ae_frame_leave(_state); + return; + } + if( npoints!=1 ) { - rep->oobrelclserror = rep->oobrelclserror/oobcnt; - rep->oobavgce = rep->oobavgce/oobcnt; - rep->oobrmserror = ae_sqrt(rep->oobrmserror/(oobcnt*nclasses), _state); - rep->oobavgerror = rep->oobavgerror/(oobcnt*nclasses); - if( oobrelcnt>0 ) + for(i=0; i<=nvars-1; i++) { - rep->oobavgrelerror = rep->oobavgrelerror/oobrelcnt; + s2->ptr.p_double[i] = ae_sqr(s2->ptr.p_double[i], _state)/(npoints-1); } } + ae_matrix_set_length(v, nvars, nvars, _state); + copyandtranspose(&vt, 0, nvars-1, 0, nvars-1, v, 0, nvars-1, 0, nvars-1, _state); ae_frame_leave(_state); } /************************************************************************* -Procesing +Principal components analysis + +This function performs truncated PCA, i.e. returns just a few most important +directions. + +Internally it uses iterative eigensolver which is very efficient when only +a minor fraction of full basis is required. Thus, if you need full basis, +it is better to use pcabuildbasis() function. + +It should be noted that, unlike LDA, PCA does not use class labels. + + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - DF - decision forest model - X - input vector, array[0..NVars-1]. + X - dataset, array[0..NPoints-1,0..NVars-1]. + matrix contains ONLY INDEPENDENT VARIABLES. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NNeeded - number of requested components, in [1,NVars] range; + this function is efficient only for NNeeded<=0, "PCATruncatedSubspace: npoints<0", _state); + ae_assert(nvars>=1, "PCATruncatedSubspace: nvars<1", _state); + ae_assert(nneeded>0, "PCATruncatedSubspace: nneeded<1", _state); + ae_assert(nneeded<=nvars, "PCATruncatedSubspace: nneeded>nvars", _state); + ae_assert(maxits>=0, "PCATruncatedSubspace: maxits<0", _state); + ae_assert(ae_isfinite(eps, _state)&&ae_fp_greater_eq(eps,(double)(0)), "PCATruncatedSubspace: eps<0 or is not finite", _state); + ae_assert(x->rows>=npoints, "PCATruncatedSubspace: rows(x)cols>=nvars||npoints==0, "PCATruncatedSubspace: cols(x)cntnclasses ) + if( npoints==0 ) { - ae_vector_set_length(y, df->nclasses, _state); + ae_vector_set_length(s2, nneeded, _state); + ae_matrix_set_length(v, nvars, nneeded, _state); + for(i=0; i<=nvars-1; i++) + { + s2->ptr.p_double[i] = (double)(0); + } + for(i=0; i<=nvars-1; i++) + { + for(j=0; j<=nneeded-1; j++) + { + if( i==j ) + { + v->ptr.pp_double[i][j] = (double)(1); + } + else + { + v->ptr.pp_double[i][j] = (double)(0); + } + } + } + ae_frame_leave(_state); + return; } - offs = 0; - for(i=0; i<=df->nclasses-1; i++) + + /* + * Center matrix + */ + ae_vector_set_length(&means, nvars, _state); + for(i=0; i<=nvars-1; i++) { - y->ptr.p_double[i] = (double)(0); + means.ptr.p_double[i] = (double)(0); } - for(i=0; i<=df->ntrees-1; i++) + vv = (double)1/(double)npoints; + for(i=0; i<=npoints-1; i++) { - - /* - * Process basic tree - */ - dforest_dfprocessinternal(df, offs, x, y, _state); - - /* - * Next tree - */ - offs = offs+ae_round(df->trees.ptr.p_double[offs], _state); + ae_v_addd(&means.ptr.p_double[0], 1, &x->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1), vv); } - v = (double)1/(double)df->ntrees; - ae_v_muld(&y->ptr.p_double[0], 1, ae_v_len(0,df->nclasses-1), v); + ae_matrix_set_length(&a, npoints, nvars, _state); + for(i=0; i<=npoints-1; i++) + { + ae_v_move(&a.ptr.pp_double[i][0], 1, &x->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1)); + ae_v_sub(&a.ptr.pp_double[i][0], 1, &means.ptr.p_double[0], 1, ae_v_len(0,nvars-1)); + } + + /* + * Find eigenvalues with subspace iteration solver + */ + eigsubspacecreate(nvars, nneeded, &solver, _state); + eigsubspacesetcond(&solver, eps, maxits, _state); + eigsubspaceoocstart(&solver, 0, _state); + while(eigsubspaceooccontinue(&solver, _state)) + { + ae_assert(solver.requesttype==0, "PCATruncatedSubspace: integrity check failed", _state); + k = solver.requestsize; + rmatrixsetlengthatleast(&b, npoints, k, _state); + rmatrixgemm(npoints, k, nvars, 1.0, &a, 0, 0, 0, &solver.x, 0, 0, 0, 0.0, &b, 0, 0, _state); + rmatrixgemm(nvars, k, npoints, 1.0, &a, 0, 0, 1, &b, 0, 0, 0, 0.0, &solver.ax, 0, 0, _state); + } + eigsubspaceoocstop(&solver, s2, v, &rep, _state); + if( npoints!=1 ) + { + for(i=0; i<=nneeded-1; i++) + { + s2->ptr.p_double[i] = s2->ptr.p_double[i]/(npoints-1); + } + } + ae_frame_leave(_state); } /************************************************************************* -'interactive' variant of DFProcess for languages like Python which support -constructs like "Y = DFProcessI(DF,X)" and interactive mode of interpreter - -This function allocates new array on each call, so it is significantly -slower than its 'non-interactive' counterpart, but it is more convenient -when you call it from command line. - - -- ALGLIB -- - Copyright 28.02.2010 by Bochkanov Sergey -*************************************************************************/ -void dfprocessi(decisionforest* df, - /* Real */ ae_vector* x, - /* Real */ ae_vector* y, - ae_state *_state) -{ +Sparse truncated principal components analysis - ae_vector_clear(y); +This function performs sparse truncated PCA, i.e. returns just a few most +important principal components for a sparse input X. - dfprocess(df, x, y, _state); -} +Internally it uses iterative eigensolver which is very efficient when only +a minor fraction of full basis is required. +It should be noted that, unlike LDA, PCA does not use class labels. -/************************************************************************* -Relative classification error on the test set + ! COMMERCIAL EDITION OF ALGLIB: + ! + ! Commercial Edition of ALGLIB includes following important improvements + ! of this function: + ! * high-performance native backend with same C# interface (C# version) + ! * multithreading support (C++ and C# versions) + ! * hardware vendor (Intel) implementations of linear algebra primitives + ! (C++ and C# versions, x86/x64 platform) + ! + ! We recommend you to read 'Working with commercial version' section of + ! ALGLIB Reference Manual in order to find out how to use performance- + ! related features provided by commercial edition of ALGLIB. INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size - -RESULT: - percent of incorrectly classified cases. - Zero if model solves regression task. - - -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey -*************************************************************************/ -double dfrelclserror(decisionforest* df, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_state *_state) -{ - double result; - - - result = (double)dforest_dfclserror(df, xy, npoints, _state)/(double)npoints; - return result; -} - - -/************************************************************************* -Average cross-entropy (in bits per element) on the test set + X - sparse dataset, sparse npoints*nvars matrix. It is + recommended to use CRS sparse storage format; non-CRS + input will be internally converted to CRS. + Matrix contains ONLY INDEPENDENT VARIABLES, and must + be EXACTLY npoints*nvars. + NPoints - dataset size, NPoints>=0 + NVars - number of independent variables, NVars>=1 + NNeeded - number of requested components, in [1,NVars] range; + this function is efficient only for NNeeded<nvars-1+1, _state); - ae_vector_set_length(&y, df->nclasses-1+1, _state); - result = (double)(0); - for(i=0; i<=npoints-1; i++) + memset(&xcrs, 0, sizeof(xcrs)); + memset(&b1, 0, sizeof(b1)); + memset(&c1, 0, sizeof(c1)); + memset(&z1, 0, sizeof(z1)); + memset(&means, 0, sizeof(means)); + memset(&solver, 0, sizeof(solver)); + memset(&rep, 0, sizeof(rep)); + ae_vector_clear(s2); + ae_matrix_clear(v); + _sparsematrix_init(&xcrs, _state, ae_true); + ae_vector_init(&b1, 0, DT_REAL, _state, ae_true); + ae_vector_init(&c1, 0, DT_REAL, _state, ae_true); + ae_vector_init(&z1, 0, DT_REAL, _state, ae_true); + ae_vector_init(&means, 0, DT_REAL, _state, ae_true); + _eigsubspacestate_init(&solver, _state, ae_true); + _eigsubspacereport_init(&rep, _state, ae_true); + + ae_assert(npoints>=0, "PCATruncatedSubspaceSparse: npoints<0", _state); + ae_assert(nvars>=1, "PCATruncatedSubspaceSparse: nvars<1", _state); + ae_assert(nneeded>0, "PCATruncatedSubspaceSparse: nneeded<1", _state); + ae_assert(nneeded<=nvars, "PCATruncatedSubspaceSparse: nneeded>nvars", _state); + ae_assert(maxits>=0, "PCATruncatedSubspaceSparse: maxits<0", _state); + ae_assert(ae_isfinite(eps, _state)&&ae_fp_greater_eq(eps,(double)(0)), "PCATruncatedSubspaceSparse: eps<0 or is not finite", _state); + if( npoints>0 ) { - ae_v_move(&x.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,df->nvars-1)); - dfprocess(df, &x, &y, _state); - if( df->nclasses>1 ) + ae_assert(sparsegetnrows(x, _state)==npoints, "PCATruncatedSubspaceSparse: rows(x)!=npoints", _state); + ae_assert(sparsegetncols(x, _state)==nvars, "PCATruncatedSubspaceSparse: cols(x)!=nvars", _state); + } + + /* + * Special case: NPoints=0 + */ + if( npoints==0 ) + { + ae_vector_set_length(s2, nneeded, _state); + ae_matrix_set_length(v, nvars, nneeded, _state); + for(i=0; i<=nvars-1; i++) { - - /* - * classification-specific code - */ - k = ae_round(xy->ptr.pp_double[i][df->nvars], _state); - tmpi = 0; - for(j=1; j<=df->nclasses-1; j++) + s2->ptr.p_double[i] = (double)(0); + } + for(i=0; i<=nvars-1; i++) + { + for(j=0; j<=nneeded-1; j++) { - if( ae_fp_greater(y.ptr.p_double[j],y.ptr.p_double[tmpi]) ) + if( i==j ) { - tmpi = j; + v->ptr.pp_double[i][j] = (double)(1); + } + else + { + v->ptr.pp_double[i][j] = (double)(0); } - } - if( ae_fp_neq(y.ptr.p_double[k],(double)(0)) ) - { - result = result-ae_log(y.ptr.p_double[k], _state); - } - else - { - result = result-ae_log(ae_minrealnumber, _state); } } + ae_frame_leave(_state); + return; } - result = result/npoints; - ae_frame_leave(_state); - return result; -} - - -/************************************************************************* -RMS error on the test set - -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size - -RESULT: - root mean square error. - Its meaning for regression task is obvious. As for - classification task, RMS error means error when estimating posterior - probabilities. - - -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey -*************************************************************************/ -double dfrmserror(decisionforest* df, - /* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_state *_state) -{ - ae_frame _frame_block; - ae_vector x; - ae_vector y; - ae_int_t i; - ae_int_t j; - ae_int_t k; - ae_int_t tmpi; - double result; - - ae_frame_make(_state, &_frame_block); - ae_vector_init(&x, 0, DT_REAL, _state); - ae_vector_init(&y, 0, DT_REAL, _state); - - ae_vector_set_length(&x, df->nvars-1+1, _state); - ae_vector_set_length(&y, df->nclasses-1+1, _state); - result = (double)(0); + + /* + * If input data are not in CRS format, perform conversion to CRS + */ + if( !sparseiscrs(x, _state) ) + { + sparsecopytocrs(x, &xcrs, _state); + pcatruncatedsubspacesparse(&xcrs, npoints, nvars, nneeded, eps, maxits, s2, v, _state); + ae_frame_leave(_state); + return; + } + + /* + * Initialize parameters, prepare buffers + */ + ae_vector_set_length(&b1, npoints, _state); + ae_vector_set_length(&z1, nvars, _state); + if( ae_fp_eq(eps,(double)(0))&&maxits==0 ) + { + eps = 1.0E-6; + } + if( maxits==0 ) + { + maxits = 50+2*nvars; + } + + /* + * Calculate mean values + */ + vv = (double)1/(double)npoints; for(i=0; i<=npoints-1; i++) { - ae_v_move(&x.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,df->nvars-1)); - dfprocess(df, &x, &y, _state); - if( df->nclasses>1 ) + b1.ptr.p_double[i] = vv; + } + sparsemtv(x, &b1, &means, _state); + + /* + * Find eigenvalues with subspace iteration solver + */ + eigsubspacecreate(nvars, nneeded, &solver, _state); + eigsubspacesetcond(&solver, eps, maxits, _state); + eigsubspaceoocstart(&solver, 0, _state); + while(eigsubspaceooccontinue(&solver, _state)) + { + ae_assert(solver.requesttype==0, "PCATruncatedSubspace: integrity check failed", _state); + for(k=0; k<=solver.requestsize-1; k++) { /* - * classification-specific code + * Calculate B1=(X-meansX)*Zk */ - k = ae_round(xy->ptr.pp_double[i][df->nvars], _state); - tmpi = 0; - for(j=1; j<=df->nclasses-1; j++) + ae_v_move(&z1.ptr.p_double[0], 1, &solver.x.ptr.pp_double[0][k], solver.x.stride, ae_v_len(0,nvars-1)); + sparsemv(x, &z1, &b1, _state); + vv = ae_v_dotproduct(&solver.x.ptr.pp_double[0][k], solver.x.stride, &means.ptr.p_double[0], 1, ae_v_len(0,nvars-1)); + for(i=0; i<=npoints-1; i++) { - if( ae_fp_greater(y.ptr.p_double[j],y.ptr.p_double[tmpi]) ) - { - tmpi = j; - } + b1.ptr.p_double[i] = b1.ptr.p_double[i]-vv; } - for(j=0; j<=df->nclasses-1; j++) + + /* + * Calculate (X-meansX)^T*B1 + */ + sparsemtv(x, &b1, &c1, _state); + vv = (double)(0); + for(i=0; i<=npoints-1; i++) { - if( j==k ) - { - result = result+ae_sqr(y.ptr.p_double[j]-1, _state); - } - else - { - result = result+ae_sqr(y.ptr.p_double[j], _state); - } + vv = vv+b1.ptr.p_double[i]; + } + for(j=0; j<=nvars-1; j++) + { + solver.ax.ptr.pp_double[j][k] = c1.ptr.p_double[j]-vv*means.ptr.p_double[j]; } } - else + } + eigsubspaceoocstop(&solver, s2, v, &rep, _state); + if( npoints!=1 ) + { + for(i=0; i<=nneeded-1; i++) { - - /* - * regression-specific code - */ - result = result+ae_sqr(y.ptr.p_double[0]-xy->ptr.pp_double[i][df->nvars], _state); + s2->ptr.p_double[i] = s2->ptr.p_double[i]/(npoints-1); } } - result = ae_sqrt(result/(npoints*df->nclasses), _state); ae_frame_leave(_state); - return result; } +#endif +#if defined(AE_COMPILE_BDSS) || !defined(AE_PARTIAL_BUILD) + + /************************************************************************* -Average error on the test set +This set of routines (DSErrAllocate, DSErrAccumulate, DSErrFinish) +calculates different error functions (classification error, cross-entropy, +rms, avg, avg.rel errors). -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size +1. DSErrAllocate prepares buffer. +2. DSErrAccumulate accumulates individual errors: + * Y contains predicted output (posterior probabilities for classification) + * DesiredY contains desired output (class number for classification) +3. DSErrFinish outputs results: + * Buf[0] contains relative classification error (zero for regression tasks) + * Buf[1] contains avg. cross-entropy (zero for regression tasks) + * Buf[2] contains rms error (regression, classification) + * Buf[3] contains average error (regression, classification) + * Buf[4] contains average relative error (regression, classification) + +NOTES(1): + "NClasses>0" means that we have classification task. + "NClasses<0" means regression task with -NClasses real outputs. -RESULT: - Its meaning for regression task is obvious. As for - classification task, it means average error when estimating posterior - probabilities. +NOTES(2): + rms. avg, avg.rel errors for classification tasks are interpreted as + errors in posterior probabilities with respect to probabilities given + by training/test set. -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 11.01.2009 by Bochkanov Sergey *************************************************************************/ -double dfavgerror(decisionforest* df, - /* Real */ ae_matrix* xy, - ae_int_t npoints, +void dserrallocate(ae_int_t nclasses, + /* Real */ ae_vector* buf, ae_state *_state) { - ae_frame _frame_block; - ae_vector x; - ae_vector y; - ae_int_t i; - ae_int_t j; - ae_int_t k; - double result; - ae_frame_make(_state, &_frame_block); - ae_vector_init(&x, 0, DT_REAL, _state); - ae_vector_init(&y, 0, DT_REAL, _state); + ae_vector_clear(buf); - ae_vector_set_length(&x, df->nvars-1+1, _state); - ae_vector_set_length(&y, df->nclasses-1+1, _state); - result = (double)(0); - for(i=0; i<=npoints-1; i++) - { - ae_v_move(&x.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,df->nvars-1)); - dfprocess(df, &x, &y, _state); - if( df->nclasses>1 ) - { - - /* - * classification-specific code - */ - k = ae_round(xy->ptr.pp_double[i][df->nvars], _state); - for(j=0; j<=df->nclasses-1; j++) - { - if( j==k ) - { - result = result+ae_fabs(y.ptr.p_double[j]-1, _state); - } - else - { - result = result+ae_fabs(y.ptr.p_double[j], _state); - } - } - } - else - { - - /* - * regression-specific code - */ - result = result+ae_fabs(y.ptr.p_double[0]-xy->ptr.pp_double[i][df->nvars], _state); - } - } - result = result/(npoints*df->nclasses); - ae_frame_leave(_state); - return result; + ae_vector_set_length(buf, 7+1, _state); + buf->ptr.p_double[0] = (double)(0); + buf->ptr.p_double[1] = (double)(0); + buf->ptr.p_double[2] = (double)(0); + buf->ptr.p_double[3] = (double)(0); + buf->ptr.p_double[4] = (double)(0); + buf->ptr.p_double[5] = (double)(nclasses); + buf->ptr.p_double[6] = (double)(0); + buf->ptr.p_double[7] = (double)(0); } /************************************************************************* -Average relative error on the test set - -INPUT PARAMETERS: - DF - decision forest model - XY - test set - NPoints - test set size - -RESULT: - Its meaning for regression task is obvious. As for - classification task, it means average relative error when estimating - posterior probability of belonging to the correct class. +See DSErrAllocate for comments on this routine. -- ALGLIB -- - Copyright 16.02.2009 by Bochkanov Sergey + Copyright 11.01.2009 by Bochkanov Sergey *************************************************************************/ -double dfavgrelerror(decisionforest* df, - /* Real */ ae_matrix* xy, - ae_int_t npoints, +void dserraccumulate(/* Real */ ae_vector* buf, + /* Real */ ae_vector* y, + /* Real */ ae_vector* desiredy, ae_state *_state) { - ae_frame _frame_block; - ae_vector x; - ae_vector y; - ae_int_t relcnt; - ae_int_t i; + ae_int_t nclasses; + ae_int_t nout; + ae_int_t offs; + ae_int_t mmax; + ae_int_t rmax; ae_int_t j; - ae_int_t k; - double result; + double v; + double ev; - ae_frame_make(_state, &_frame_block); - ae_vector_init(&x, 0, DT_REAL, _state); - ae_vector_init(&y, 0, DT_REAL, _state); - ae_vector_set_length(&x, df->nvars-1+1, _state); - ae_vector_set_length(&y, df->nclasses-1+1, _state); - result = (double)(0); - relcnt = 0; - for(i=0; i<=npoints-1; i++) + offs = 5; + nclasses = ae_round(buf->ptr.p_double[offs], _state); + if( nclasses>0 ) { - ae_v_move(&x.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,df->nvars-1)); - dfprocess(df, &x, &y, _state); - if( df->nclasses>1 ) + + /* + * Classification + */ + rmax = ae_round(desiredy->ptr.p_double[0], _state); + mmax = 0; + for(j=1; j<=nclasses-1; j++) { - - /* - * classification-specific code - */ - k = ae_round(xy->ptr.pp_double[i][df->nvars], _state); - for(j=0; j<=df->nclasses-1; j++) + if( ae_fp_greater(y->ptr.p_double[j],y->ptr.p_double[mmax]) ) { - if( j==k ) - { - result = result+ae_fabs(y.ptr.p_double[j]-1, _state); - relcnt = relcnt+1; - } + mmax = j; } } + if( mmax!=rmax ) + { + buf->ptr.p_double[0] = buf->ptr.p_double[0]+1; + } + if( ae_fp_greater(y->ptr.p_double[rmax],(double)(0)) ) + { + buf->ptr.p_double[1] = buf->ptr.p_double[1]-ae_log(y->ptr.p_double[rmax], _state); + } else { - - /* - * regression-specific code - */ - if( ae_fp_neq(xy->ptr.pp_double[i][df->nvars],(double)(0)) ) + buf->ptr.p_double[1] = buf->ptr.p_double[1]+ae_log(ae_maxrealnumber, _state); + } + for(j=0; j<=nclasses-1; j++) + { + v = y->ptr.p_double[j]; + if( j==rmax ) { - result = result+ae_fabs((y.ptr.p_double[0]-xy->ptr.pp_double[i][df->nvars])/xy->ptr.pp_double[i][df->nvars], _state); - relcnt = relcnt+1; + ev = (double)(1); + } + else + { + ev = (double)(0); + } + buf->ptr.p_double[2] = buf->ptr.p_double[2]+ae_sqr(v-ev, _state); + buf->ptr.p_double[3] = buf->ptr.p_double[3]+ae_fabs(v-ev, _state); + if( ae_fp_neq(ev,(double)(0)) ) + { + buf->ptr.p_double[4] = buf->ptr.p_double[4]+ae_fabs((v-ev)/ev, _state); + buf->ptr.p_double[offs+2] = buf->ptr.p_double[offs+2]+1; } } + buf->ptr.p_double[offs+1] = buf->ptr.p_double[offs+1]+1; } - if( relcnt>0 ) + else { - result = result/relcnt; + + /* + * Regression + */ + nout = -nclasses; + rmax = 0; + for(j=1; j<=nout-1; j++) + { + if( ae_fp_greater(desiredy->ptr.p_double[j],desiredy->ptr.p_double[rmax]) ) + { + rmax = j; + } + } + mmax = 0; + for(j=1; j<=nout-1; j++) + { + if( ae_fp_greater(y->ptr.p_double[j],y->ptr.p_double[mmax]) ) + { + mmax = j; + } + } + if( mmax!=rmax ) + { + buf->ptr.p_double[0] = buf->ptr.p_double[0]+1; + } + for(j=0; j<=nout-1; j++) + { + v = y->ptr.p_double[j]; + ev = desiredy->ptr.p_double[j]; + buf->ptr.p_double[2] = buf->ptr.p_double[2]+ae_sqr(v-ev, _state); + buf->ptr.p_double[3] = buf->ptr.p_double[3]+ae_fabs(v-ev, _state); + if( ae_fp_neq(ev,(double)(0)) ) + { + buf->ptr.p_double[4] = buf->ptr.p_double[4]+ae_fabs((v-ev)/ev, _state); + buf->ptr.p_double[offs+2] = buf->ptr.p_double[offs+2]+1; + } + } + buf->ptr.p_double[offs+1] = buf->ptr.p_double[offs+1]+1; } - ae_frame_leave(_state); - return result; -} - - -/************************************************************************* -Copying of DecisionForest strucure - -INPUT PARAMETERS: - DF1 - original - -OUTPUT PARAMETERS: - DF2 - copy - - -- ALGLIB -- - Copyright 13.02.2009 by Bochkanov Sergey -*************************************************************************/ -void dfcopy(decisionforest* df1, decisionforest* df2, ae_state *_state) -{ - - _decisionforest_clear(df2); - - df2->nvars = df1->nvars; - df2->nclasses = df1->nclasses; - df2->ntrees = df1->ntrees; - df2->bufsize = df1->bufsize; - ae_vector_set_length(&df2->trees, df1->bufsize-1+1, _state); - ae_v_move(&df2->trees.ptr.p_double[0], 1, &df1->trees.ptr.p_double[0], 1, ae_v_len(0,df1->bufsize-1)); -} - - -/************************************************************************* -Serializer: allocation - - -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey -*************************************************************************/ -void dfalloc(ae_serializer* s, decisionforest* forest, ae_state *_state) -{ - - - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - ae_serializer_alloc_entry(s); - allocrealarray(s, &forest->trees, forest->bufsize, _state); } /************************************************************************* -Serializer: serialization +See DSErrAllocate for comments on this routine. -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey + Copyright 11.01.2009 by Bochkanov Sergey *************************************************************************/ -void dfserialize(ae_serializer* s, - decisionforest* forest, - ae_state *_state) +void dserrfinish(/* Real */ ae_vector* buf, ae_state *_state) { + ae_int_t nout; + ae_int_t offs; - ae_serializer_serialize_int(s, getrdfserializationcode(_state), _state); - ae_serializer_serialize_int(s, dforest_dffirstversion, _state); - ae_serializer_serialize_int(s, forest->nvars, _state); - ae_serializer_serialize_int(s, forest->nclasses, _state); - ae_serializer_serialize_int(s, forest->ntrees, _state); - ae_serializer_serialize_int(s, forest->bufsize, _state); - serializerealarray(s, &forest->trees, forest->bufsize, _state); + offs = 5; + nout = ae_iabs(ae_round(buf->ptr.p_double[offs], _state), _state); + if( ae_fp_neq(buf->ptr.p_double[offs+1],(double)(0)) ) + { + buf->ptr.p_double[0] = buf->ptr.p_double[0]/buf->ptr.p_double[offs+1]; + buf->ptr.p_double[1] = buf->ptr.p_double[1]/buf->ptr.p_double[offs+1]; + buf->ptr.p_double[2] = ae_sqrt(buf->ptr.p_double[2]/(nout*buf->ptr.p_double[offs+1]), _state); + buf->ptr.p_double[3] = buf->ptr.p_double[3]/(nout*buf->ptr.p_double[offs+1]); + } + if( ae_fp_neq(buf->ptr.p_double[offs+2],(double)(0)) ) + { + buf->ptr.p_double[4] = buf->ptr.p_double[4]/buf->ptr.p_double[offs+2]; + } } /************************************************************************* -Serializer: unserialization -- ALGLIB -- - Copyright 14.03.2011 by Bochkanov Sergey + Copyright 19.05.2008 by Bochkanov Sergey *************************************************************************/ -void dfunserialize(ae_serializer* s, - decisionforest* forest, +void dsnormalize(/* Real */ ae_matrix* xy, + ae_int_t npoints, + ae_int_t nvars, + ae_int_t* info, + /* Real */ ae_vector* means, + /* Real */ ae_vector* sigmas, ae_state *_state) { - ae_int_t i0; - ae_int_t i1; + ae_frame _frame_block; + ae_int_t i; + ae_int_t j; + ae_vector tmp; + double mean; + double variance; + double skewness; + double kurtosis; - _decisionforest_clear(forest); + ae_frame_make(_state, &_frame_block); + memset(&tmp, 0, sizeof(tmp)); + *info = 0; + ae_vector_clear(means); + ae_vector_clear(sigmas); + ae_vector_init(&tmp, 0, DT_REAL, _state, ae_true); /* - * check correctness of header + * Test parameters */ - ae_serializer_unserialize_int(s, &i0, _state); - ae_assert(i0==getrdfserializationcode(_state), "DFUnserialize: stream header corrupted", _state); - ae_serializer_unserialize_int(s, &i1, _state); - ae_assert(i1==dforest_dffirstversion, "DFUnserialize: stream header corrupted", _state); + if( npoints<=0||nvars<1 ) + { + *info = -1; + ae_frame_leave(_state); + return; + } + *info = 1; /* - * Unserialize data + * Standartization */ - ae_serializer_unserialize_int(s, &forest->nvars, _state); - ae_serializer_unserialize_int(s, &forest->nclasses, _state); - ae_serializer_unserialize_int(s, &forest->ntrees, _state); - ae_serializer_unserialize_int(s, &forest->bufsize, _state); - unserializerealarray(s, &forest->trees, _state); + ae_vector_set_length(means, nvars-1+1, _state); + ae_vector_set_length(sigmas, nvars-1+1, _state); + ae_vector_set_length(&tmp, npoints-1+1, _state); + for(j=0; j<=nvars-1; j++) + { + ae_v_move(&tmp.ptr.p_double[0], 1, &xy->ptr.pp_double[0][j], xy->stride, ae_v_len(0,npoints-1)); + samplemoments(&tmp, npoints, &mean, &variance, &skewness, &kurtosis, _state); + means->ptr.p_double[j] = mean; + sigmas->ptr.p_double[j] = ae_sqrt(variance, _state); + if( ae_fp_eq(sigmas->ptr.p_double[j],(double)(0)) ) + { + sigmas->ptr.p_double[j] = (double)(1); + } + for(i=0; i<=npoints-1; i++) + { + xy->ptr.pp_double[i][j] = (xy->ptr.pp_double[i][j]-means->ptr.p_double[j])/sigmas->ptr.p_double[j]; + } + } + ae_frame_leave(_state); } /************************************************************************* -Classification error + + -- ALGLIB -- + Copyright 19.05.2008 by Bochkanov Sergey *************************************************************************/ -static ae_int_t dforest_dfclserror(decisionforest* df, - /* Real */ ae_matrix* xy, +void dsnormalizec(/* Real */ ae_matrix* xy, ae_int_t npoints, + ae_int_t nvars, + ae_int_t* info, + /* Real */ ae_vector* means, + /* Real */ ae_vector* sigmas, + ae_state *_state) +{ + ae_frame _frame_block; + ae_int_t j; + ae_vector tmp; + double mean; + double variance; + double skewness; + double kurtosis; + + ae_frame_make(_state, &_frame_block); + memset(&tmp, 0, sizeof(tmp)); + *info = 0; + ae_vector_clear(means); + ae_vector_clear(sigmas); + ae_vector_init(&tmp, 0, DT_REAL, _state, ae_true); + + + /* + * Test parameters + */ + if( npoints<=0||nvars<1 ) + { + *info = -1; + ae_frame_leave(_state); + return; + } + *info = 1; + + /* + * Standartization + */ + ae_vector_set_length(means, nvars-1+1, _state); + ae_vector_set_length(sigmas, nvars-1+1, _state); + ae_vector_set_length(&tmp, npoints-1+1, _state); + for(j=0; j<=nvars-1; j++) + { + ae_v_move(&tmp.ptr.p_double[0], 1, &xy->ptr.pp_double[0][j], xy->stride, ae_v_len(0,npoints-1)); + samplemoments(&tmp, npoints, &mean, &variance, &skewness, &kurtosis, _state); + means->ptr.p_double[j] = mean; + sigmas->ptr.p_double[j] = ae_sqrt(variance, _state); + if( ae_fp_eq(sigmas->ptr.p_double[j],(double)(0)) ) + { + sigmas->ptr.p_double[j] = (double)(1); + } + } + ae_frame_leave(_state); +} + + +/************************************************************************* + + -- ALGLIB -- + Copyright 19.05.2008 by Bochkanov Sergey +*************************************************************************/ +double dsgetmeanmindistance(/* Real */ ae_matrix* xy, + ae_int_t npoints, + ae_int_t nvars, ae_state *_state) { ae_frame _frame_block; - ae_vector x; - ae_vector y; ae_int_t i; ae_int_t j; - ae_int_t k; - ae_int_t tmpi; - ae_int_t result; + ae_vector tmp; + ae_vector tmp2; + double v; + double result; ae_frame_make(_state, &_frame_block); - ae_vector_init(&x, 0, DT_REAL, _state); - ae_vector_init(&y, 0, DT_REAL, _state); + memset(&tmp, 0, sizeof(tmp)); + memset(&tmp2, 0, sizeof(tmp2)); + ae_vector_init(&tmp, 0, DT_REAL, _state, ae_true); + ae_vector_init(&tmp2, 0, DT_REAL, _state, ae_true); - if( df->nclasses<=1 ) + + /* + * Test parameters + */ + if( npoints<=0||nvars<1 ) { - result = 0; + result = (double)(0); ae_frame_leave(_state); return result; } - ae_vector_set_length(&x, df->nvars-1+1, _state); - ae_vector_set_length(&y, df->nclasses-1+1, _state); - result = 0; + + /* + * Process + */ + ae_vector_set_length(&tmp, npoints-1+1, _state); for(i=0; i<=npoints-1; i++) { - ae_v_move(&x.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,df->nvars-1)); - dfprocess(df, &x, &y, _state); - k = ae_round(xy->ptr.pp_double[i][df->nvars], _state); - tmpi = 0; - for(j=1; j<=df->nclasses-1; j++) - { - if( ae_fp_greater(y.ptr.p_double[j],y.ptr.p_double[tmpi]) ) - { - tmpi = j; - } - } - if( tmpi!=k ) + tmp.ptr.p_double[i] = ae_maxrealnumber; + } + ae_vector_set_length(&tmp2, nvars-1+1, _state); + for(i=0; i<=npoints-1; i++) + { + for(j=i+1; j<=npoints-1; j++) { - result = result+1; + ae_v_move(&tmp2.ptr.p_double[0], 1, &xy->ptr.pp_double[i][0], 1, ae_v_len(0,nvars-1)); + ae_v_sub(&tmp2.ptr.p_double[0], 1, &xy->ptr.pp_double[j][0], 1, ae_v_len(0,nvars-1)); + v = ae_v_dotproduct(&tmp2.ptr.p_double[0], 1, &tmp2.ptr.p_double[0], 1, ae_v_len(0,nvars-1)); + v = ae_sqrt(v, _state); + tmp.ptr.p_double[i] = ae_minreal(tmp.ptr.p_double[i], v, _state); + tmp.ptr.p_double[j] = ae_minreal(tmp.ptr.p_double[j], v, _state); } } + result = (double)(0); + for(i=0; i<=npoints-1; i++) + { + result = result+tmp.ptr.p_double[i]/npoints; + } ae_frame_leave(_state); return result; } /************************************************************************* -Internal subroutine for processing one decision tree starting at Offs + + -- ALGLIB -- + Copyright 19.05.2008 by Bochkanov Sergey *************************************************************************/ -static void dforest_dfprocessinternal(decisionforest* df, - ae_int_t offs, - /* Real */ ae_vector* x, - /* Real */ ae_vector* y, +void dstie(/* Real */ ae_vector* a, + ae_int_t n, + /* Integer */ ae_vector* ties, + ae_int_t* tiecount, + /* Integer */ ae_vector* p1, + /* Integer */ ae_vector* p2, ae_state *_state) { + ae_frame _frame_block; + ae_int_t i; ae_int_t k; - ae_int_t idx; + ae_vector tmp; + ae_frame_make(_state, &_frame_block); + memset(&tmp, 0, sizeof(tmp)); + ae_vector_clear(ties); + *tiecount = 0; + ae_vector_clear(p1); + ae_vector_clear(p2); + ae_vector_init(&tmp, 0, DT_INT, _state, ae_true); /* - * Set pointer to the root + * Special case + */ + if( n<=0 ) + { + *tiecount = 0; + ae_frame_leave(_state); + return; + } + + /* + * Sort A */ - k = offs+1; + tagsort(a, n, p1, p2, _state); /* - * Navigate through the tree + * Process ties */ - for(;;) + *tiecount = 1; + for(i=1; i<=n-1; i++) { - if( ae_fp_eq(df->trees.ptr.p_double[k],(double)(-1)) ) - { - if( df->nclasses==1 ) - { - y->ptr.p_double[0] = y->ptr.p_double[0]+df->trees.ptr.p_double[k+1]; - } - else - { - idx = ae_round(df->trees.ptr.p_double[k+1], _state); - y->ptr.p_double[idx] = y->ptr.p_double[idx]+1; - } - break; - } - if( ae_fp_less(x->ptr.p_double[ae_round(df->trees.ptr.p_double[k], _state)],df->trees.ptr.p_double[k+1]) ) + if( ae_fp_neq(a->ptr.p_double[i],a->ptr.p_double[i-1]) ) { - k = k+dforest_innernodewidth; + *tiecount = *tiecount+1; } - else + } + ae_vector_set_length(ties, *tiecount+1, _state); + ties->ptr.p_int[0] = 0; + k = 1; + for(i=1; i<=n-1; i++) + { + if( ae_fp_neq(a->ptr.p_double[i],a->ptr.p_double[i-1]) ) { - k = offs+ae_round(df->trees.ptr.p_double[k+2], _state); + ties->ptr.p_int[k] = i; + k = k+1; } } + ties->ptr.p_int[*tiecount] = n; + ae_frame_leave(_state); } /************************************************************************* -Builds one decision tree. Just a wrapper for the DFBuildTreeRec. + + -- ALGLIB -- + Copyright 11.12.2008 by Bochkanov Sergey *************************************************************************/ -static void dforest_dfbuildtree(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t nfeatures, - ae_int_t nvarsinpool, - ae_int_t flags, - dfinternalbuffers* bufs, - hqrndstate* rs, +void dstiefasti(/* Real */ ae_vector* a, + /* Integer */ ae_vector* b, + ae_int_t n, + /* Integer */ ae_vector* ties, + ae_int_t* tiecount, + /* Real */ ae_vector* bufr, + /* Integer */ ae_vector* bufi, ae_state *_state) { - ae_int_t numprocessed; + ae_frame _frame_block; ae_int_t i; + ae_int_t k; + ae_vector tmp; + ae_frame_make(_state, &_frame_block); + memset(&tmp, 0, sizeof(tmp)); + *tiecount = 0; + ae_vector_init(&tmp, 0, DT_INT, _state, ae_true); - ae_assert(npoints>0, "Assertion failed", _state); /* - * Prepare IdxBuf. It stores indices of the training set elements. - * When training set is being split, contents of IdxBuf is - * correspondingly reordered so we can know which elements belong - * to which branch of decision tree. + * Special case */ - for(i=0; i<=npoints-1; i++) + if( n<=0 ) { - bufs->idxbuf.ptr.p_int[i] = i; + *tiecount = 0; + ae_frame_leave(_state); + return; } /* - * Recursive procedure + * Sort A + */ + tagsortfasti(a, b, bufr, bufi, n, _state); + + /* + * Process ties */ - numprocessed = 1; - dforest_dfbuildtreerec(xy, npoints, nvars, nclasses, nfeatures, nvarsinpool, flags, &numprocessed, 0, npoints-1, bufs, rs, _state); - bufs->treebuf.ptr.p_double[0] = (double)(numprocessed); + ties->ptr.p_int[0] = 0; + k = 1; + for(i=1; i<=n-1; i++) + { + if( ae_fp_neq(a->ptr.p_double[i],a->ptr.p_double[i-1]) ) + { + ties->ptr.p_int[k] = i; + k = k+1; + } + } + ties->ptr.p_int[k] = n; + *tiecount = k; + ae_frame_leave(_state); } /************************************************************************* -Builds one decision tree (internal recursive subroutine) +Optimal binary classification + +Algorithms finds optimal (=with minimal cross-entropy) binary partition. +Internal subroutine. + +INPUT PARAMETERS: + A - array[0..N-1], variable + C - array[0..N-1], class numbers (0 or 1). + N - array size + +OUTPUT PARAMETERS: + Info - completetion code: + * -3, all values of A[] are same (partition is impossible) + * -2, one of C[] is incorrect (<0, >1) + * -1, incorrect pararemets were passed (N<=0). + * 1, OK + Threshold- partiton boundary. Left part contains values which are + strictly less than Threshold. Right part contains values + which are greater than or equal to Threshold. + PAL, PBL- probabilities P(0|v=Threshold) and P(1|v>=Threshold) + CVE - cross-validation estimate of cross-entropy -Parameters: - TreeBuf - large enough array, at least TreeSize - IdxBuf - at least NPoints elements - TmpBufR - at least NPoints - TmpBufR2 - at least NPoints - TmpBufI - at least NPoints - TmpBufI2 - at least NPoints+1 + -- ALGLIB -- + Copyright 22.05.2008 by Bochkanov Sergey *************************************************************************/ -static void dforest_dfbuildtreerec(/* Real */ ae_matrix* xy, - ae_int_t npoints, - ae_int_t nvars, - ae_int_t nclasses, - ae_int_t nfeatures, - ae_int_t nvarsinpool, - ae_int_t flags, - ae_int_t* numprocessed, - ae_int_t idx1, - ae_int_t idx2, - dfinternalbuffers* bufs, - hqrndstate* rs, +void dsoptimalsplit2(/* Real */ ae_vector* a, + /* Integer */ ae_vector* c, + ae_int_t n, + ae_int_t* info, + double* threshold, + double* pal, + double* pbl, + double* par, + double* pbr, + double* cve, ae_state *_state) { + ae_frame _frame_block; + ae_vector _a; + ae_vector _c; ae_int_t i; - ae_int_t j; - ae_int_t k; - ae_bool bflag; - ae_int_t i1; - ae_int_t i2; - ae_int_t info; - double sl; - double sr; - double w; - ae_int_t idxbest; - double ebest; - double tbest; - ae_int_t varcur; + ae_int_t t; double s; - double v; - double v1; - double v2; - double threshold; - ae_int_t oldnp; - double currms; - ae_bool useevs; + ae_vector ties; + ae_int_t tiecount; + ae_vector p1; + ae_vector p2; + ae_int_t k; + ae_int_t koptimal; + double pak; + double pbk; + double cvoptimal; + double cv; + ae_frame_make(_state, &_frame_block); + memset(&_a, 0, sizeof(_a)); + memset(&_c, 0, sizeof(_c)); + memset(&ties, 0, sizeof(ties)); + memset(&p1, 0, sizeof(p1)); + memset(&p2, 0, sizeof(p2)); + ae_vector_init_copy(&_a, a, _state, ae_true); + a = &_a; + ae_vector_init_copy(&_c, c, _state, ae_true); + c = &_c; + *info = 0; + *threshold = 0; + *pal = 0; + *pbl = 0; + *par = 0; + *pbr = 0; + *cve = 0; + ae_vector_init(&ties, 0, DT_INT, _state, ae_true); + ae_vector_init(&p1, 0, DT_INT, _state, ae_true); + ae_vector_init(&p2, 0, DT_INT, _state, ae_true); /* - * these initializers are not really necessary, - * but without them compiler complains about uninitialized locals + * Test for errors in inputs */ - tbest = (double)(0); + if( n<=0 ) + { + *info = -1; + ae_frame_leave(_state); + return; + } + for(i=0; i<=n-1; i++) + { + if( c->ptr.p_int[i]!=0&&c->ptr.p_int[i]!=1 ) + { + *info = -2; + ae_frame_leave(_state); + return; + } + } + *info = 1; /* - * Prepare + * Tie */ - ae_assert(npoints>0, "Assertion failed", _state); - ae_assert(idx2>=idx1, "Assertion failed", _state); - useevs = flags/dforest_dfuseevs%2!=0; + dstie(a, n, &ties, &tiecount, &p1, &p2, _state); + for(i=0; i<=n-1; i++) + { + if( p2.ptr.p_int[i]!=i ) + { + t = c->ptr.p_int[i]; + c->ptr.p_int[i] = c->ptr.p_int[p2.ptr.p_int[i]]; + c->ptr.p_int[p2.ptr.p_int[i]] = t; + } + } /* - * Leaf node + * Special case: number of ties is 1. + * + * NOTE: we assume that P[i,j] equals to 0 or 1, + * intermediate values are not allowed. */ - if( idx2==idx1 ) + if( tiecount==1 ) { - bufs->treebuf.ptr.p_double[*numprocessed] = (double)(-1); - bufs->treebuf.ptr.p_double[*numprocessed+1] = xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[idx1]][nvars]; - *numprocessed = *numprocessed+dforest_leafnodewidth; + *info = -3; + ae_frame_leave(_state); return; } /* - * Non-leaf node. - * Select random variable, prepare split: - * 1. prepare default solution - no splitting, class at random - * 2. investigate possible splits, compare with default/best + * General case, number of ties > 1 + * + * NOTE: we assume that P[i,j] equals to 0 or 1, + * intermediate values are not allowed. */ - idxbest = -1; - if( nclasses>1 ) - { - - /* - * default solution for classification - */ - for(i=0; i<=nclasses-1; i++) - { - bufs->classibuf.ptr.p_int[i] = 0; - } - s = (double)(idx2-idx1+1); - for(i=idx1; i<=idx2; i++) - { - j = ae_round(xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[i]][nvars], _state); - bufs->classibuf.ptr.p_int[j] = bufs->classibuf.ptr.p_int[j]+1; - } - ebest = (double)(0); - for(i=0; i<=nclasses-1; i++) - { - ebest = ebest+bufs->classibuf.ptr.p_int[i]*ae_sqr(1-bufs->classibuf.ptr.p_int[i]/s, _state)+(s-bufs->classibuf.ptr.p_int[i])*ae_sqr(bufs->classibuf.ptr.p_int[i]/s, _state); - } - ebest = ae_sqrt(ebest/(nclasses*(idx2-idx1+1)), _state); - } - else + *pal = (double)(0); + *pbl = (double)(0); + *par = (double)(0); + *pbr = (double)(0); + for(i=0; i<=n-1; i++) { - - /* - * default solution for regression - */ - v = (double)(0); - for(i=idx1; i<=idx2; i++) + if( c->ptr.p_int[i]==0 ) { - v = v+xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[i]][nvars]; + *par = *par+1; } - v = v/(idx2-idx1+1); - ebest = (double)(0); - for(i=idx1; i<=idx2; i++) + if( c->ptr.p_int[i]==1 ) { - ebest = ebest+ae_sqr(xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[i]][nvars]-v, _state); + *pbr = *pbr+1; } - ebest = ae_sqrt(ebest/(idx2-idx1+1), _state); } - i = 0; - while(i<=ae_minint(nfeatures, nvarsinpool, _state)-1) + koptimal = -1; + cvoptimal = ae_maxrealnumber; + for(k=0; k<=tiecount-2; k++) { /* - * select variables from pool - */ - j = i+hqrnduniformi(rs, nvarsinpool-i, _state); - k = bufs->varpool.ptr.p_int[i]; - bufs->varpool.ptr.p_int[i] = bufs->varpool.ptr.p_int[j]; - bufs->varpool.ptr.p_int[j] = k; - varcur = bufs->varpool.ptr.p_int[i]; - - /* - * load variable values to working array - * - * apply EVS preprocessing: if all variable values are same, - * variable is excluded from pool. - * - * This is necessary for binary pre-splits (see later) to work. + * first, obtain information about K-th tie which is + * moved from R-part to L-part */ - for(j=idx1; j<=idx2; j++) - { - bufs->tmpbufr.ptr.p_double[j-idx1] = xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[j]][varcur]; - } - if( useevs ) + pak = (double)(0); + pbk = (double)(0); + for(i=ties.ptr.p_int[k]; i<=ties.ptr.p_int[k+1]-1; i++) { - bflag = ae_false; - v = bufs->tmpbufr.ptr.p_double[0]; - for(j=0; j<=idx2-idx1; j++) + if( c->ptr.p_int[i]==0 ) { - if( ae_fp_neq(bufs->tmpbufr.ptr.p_double[j],v) ) - { - bflag = ae_true; - break; - } + pak = pak+1; } - if( !bflag ) + if( c->ptr.p_int[i]==1 ) { - - /* - * exclude variable from pool, - * go to the next iteration. - * I is not increased. - */ - k = bufs->varpool.ptr.p_int[i]; - bufs->varpool.ptr.p_int[i] = bufs->varpool.ptr.p_int[nvarsinpool-1]; - bufs->varpool.ptr.p_int[nvarsinpool-1] = k; - nvarsinpool = nvarsinpool-1; - continue; + pbk = pbk+1; } } /* - * load labels to working array + * Calculate cross-validation CE */ - if( nclasses>1 ) - { - for(j=idx1; j<=idx2; j++) - { - bufs->tmpbufi.ptr.p_int[j-idx1] = ae_round(xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[j]][nvars], _state); - } - } - else + cv = (double)(0); + cv = cv-bdss_xlny(*pal+pak, (*pal+pak)/(*pal+pak+(*pbl)+pbk+1), _state); + cv = cv-bdss_xlny(*pbl+pbk, (*pbl+pbk)/(*pal+pak+1+(*pbl)+pbk), _state); + cv = cv-bdss_xlny(*par-pak, (*par-pak)/(*par-pak+(*pbr)-pbk+1), _state); + cv = cv-bdss_xlny(*pbr-pbk, (*pbr-pbk)/(*par-pak+1+(*pbr)-pbk), _state); + + /* + * Compare with best + */ + if( ae_fp_less(cv,cvoptimal) ) { - for(j=idx1; j<=idx2; j++) - { - bufs->tmpbufr2.ptr.p_double[j-idx1] = xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[j]][nvars]; - } + cvoptimal = cv; + koptimal = k; } /* - * calculate split + * update */ - if( useevs&&bufs->evsbin.ptr.p_bool[varcur] ) + *pal = *pal+pak; + *pbl = *pbl+pbk; + *par = *par-pak; + *pbr = *pbr-pbk; + } + *cve = cvoptimal; + *threshold = 0.5*(a->ptr.p_double[ties.ptr.p_int[koptimal]]+a->ptr.p_double[ties.ptr.p_int[koptimal+1]]); + *pal = (double)(0); + *pbl = (double)(0); + *par = (double)(0); + *pbr = (double)(0); + for(i=0; i<=n-1; i++) + { + if( ae_fp_less(a->ptr.p_double[i],*threshold) ) { - - /* - * Pre-calculated splits for binary variables. - * Threshold is already known, just calculate RMS error - */ - threshold = bufs->evssplits.ptr.p_double[varcur]; - if( nclasses>1 ) + if( c->ptr.p_int[i]==0 ) { - - /* - * classification-specific code - */ - for(j=0; j<=2*nclasses-1; j++) - { - bufs->classibuf.ptr.p_int[j] = 0; - } - sl = (double)(0); - sr = (double)(0); - for(j=0; j<=idx2-idx1; j++) - { - k = bufs->tmpbufi.ptr.p_int[j]; - if( ae_fp_less(bufs->tmpbufr.ptr.p_double[j],threshold) ) - { - bufs->classibuf.ptr.p_int[k] = bufs->classibuf.ptr.p_int[k]+1; - sl = sl+1; - } - else - { - bufs->classibuf.ptr.p_int[k+nclasses] = bufs->classibuf.ptr.p_int[k+nclasses]+1; - sr = sr+1; - } - } - ae_assert(ae_fp_neq(sl,(double)(0))&&ae_fp_neq(sr,(double)(0)), "DFBuildTreeRec: something strange!", _state); - currms = (double)(0); - for(j=0; j<=nclasses-1; j++) - { - w = (double)(bufs->classibuf.ptr.p_int[j]); - currms = currms+w*ae_sqr(w/sl-1, _state); - currms = currms+(sl-w)*ae_sqr(w/sl, _state); - w = (double)(bufs->classibuf.ptr.p_int[nclasses+j]); - currms = currms+w*ae_sqr(w/sr-1, _state); - currms = currms+(sr-w)*ae_sqr(w/sr, _state); - } - currms = ae_sqrt(currms/(nclasses*(idx2-idx1+1)), _state); + *pal = *pal+1; } else { - - /* - * regression-specific code - */ - sl = (double)(0); - sr = (double)(0); - v1 = (double)(0); - v2 = (double)(0); - for(j=0; j<=idx2-idx1; j++) - { - if( ae_fp_less(bufs->tmpbufr.ptr.p_double[j],threshold) ) - { - v1 = v1+bufs->tmpbufr2.ptr.p_double[j]; - sl = sl+1; - } - else - { - v2 = v2+bufs->tmpbufr2.ptr.p_double[j]; - sr = sr+1; - } - } - ae_assert(ae_fp_neq(sl,(double)(0))&&ae_fp_neq(sr,(double)(0)), "DFBuildTreeRec: something strange!", _state); - v1 = v1/sl; - v2 = v2/sr; - currms = (double)(0); - for(j=0; j<=idx2-idx1; j++) - { - if( ae_fp_less(bufs->tmpbufr.ptr.p_double[j],threshold) ) - { - currms = currms+ae_sqr(v1-bufs->tmpbufr2.ptr.p_double[j], _state); - } - else - { - currms = currms+ae_sqr(v2-bufs->tmpbufr2.ptr.p_double[j], _state); - } - } - currms = ae_sqrt(currms/(idx2-idx1+1), _state); + *pbl = *pbl+1; } - info = 1; } else { - - /* - * Generic splits - */ - if( nclasses>1 ) + if( c->ptr.p_int[i]==0 ) { - dforest_dfsplitc(&bufs->tmpbufr, &bufs->tmpbufi, &bufs->classibuf, idx2-idx1+1, nclasses, dforest_dfusestrongsplits, &info, &threshold, &currms, &bufs->sortrbuf, &bufs->sortibuf, _state); + *par = *par+1; } else { - dforest_dfsplitr(&bufs->tmpbufr, &bufs->tmpbufr2, idx2-idx1+1, dforest_dfusestrongsplits, &info, &threshold, &currms, &bufs->sortrbuf, &bufs->sortrbuf2, _state); - } - } - if( info>0 ) - { - if( ae_fp_less_eq(currms,ebest) ) - { - ebest = currms; - idxbest = varcur; - tbest = threshold; + *pbr = *pbr+1; } } - - /* - * Next iteration - */ - i = i+1; - } - - /* - * to split or not to split - */ - if( idxbest<0 ) - { - - /* - * All values are same, cannot split. - */ - bufs->treebuf.ptr.p_double[*numprocessed] = (double)(-1); - if( nclasses>1 ) - { - - /* - * Select random class label (randomness allows us to - * approximate distribution of the classes) - */ - bufs->treebuf.ptr.p_double[*numprocessed+1] = (double)(ae_round(xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[idx1+hqrnduniformi(rs, idx2-idx1+1, _state)]][nvars], _state)); - } - else - { - - /* - * Select average (for regression task). - */ - v = (double)(0); - for(i=idx1; i<=idx2; i++) - { - v = v+xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[i]][nvars]/(idx2-idx1+1); - } - bufs->treebuf.ptr.p_double[*numprocessed+1] = v; - } - *numprocessed = *numprocessed+dforest_leafnodewidth; - } - else - { - - /* - * we can split - */ - bufs->treebuf.ptr.p_double[*numprocessed] = (double)(idxbest); - bufs->treebuf.ptr.p_double[*numprocessed+1] = tbest; - i1 = idx1; - i2 = idx2; - while(i1<=i2) - { - - /* - * Reorder indices so that left partition is in [Idx1..I1-1], - * and right partition is in [I2+1..Idx2] - */ - if( ae_fp_less(xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[i1]][idxbest],tbest) ) - { - i1 = i1+1; - continue; - } - if( ae_fp_greater_eq(xy->ptr.pp_double[bufs->idxbuf.ptr.p_int[i2]][idxbest],tbest) ) - { - i2 = i2-1; - continue; - } - j = bufs->idxbuf.ptr.p_int[i1]; - bufs->idxbuf.ptr.p_int[i1] = bufs->idxbuf.ptr.p_int[i2]; - bufs->idxbuf.ptr.p_int[i2] = j; - i1 = i1+1; - i2 = i2-1; - } - oldnp = *numprocessed; - *numprocessed = *numprocessed+dforest_innernodewidth; - dforest_dfbuildtreerec(xy, npoints, nvars, nclasses, nfeatures, nvarsinpool, flags, numprocessed, idx1, i1-1, bufs, rs, _state); - bufs->treebuf.ptr.p_double[oldnp+2] = (double)(*numprocessed); - dforest_dfbuildtreerec(xy, npoints, nvars, nclasses, nfeatures, nvarsinpool, flags, numprocessed, i2+1, idx2, bufs, rs, _state); } + s = *pal+(*pbl); + *pal = *pal/s; + *pbl = *pbl/s; + s = *par+(*pbr); + *par = *par/s; + *pbr = *pbr/s; + ae_frame_leave(_state); } /************************************************************************* -Makes split on attribute +Optimal partition, internal subroutine. Fast version. + +Accepts: + A array[0..N-1] array of attributes array[0..N-1] + C array[0..N-1] array of class labels + TiesBuf array[0..N] temporaries (ties) + CntBuf array[0..2*NC-1] temporaries (counts) + Alpha centering factor (0<=alpha<=1, recommended value - 0.05) + BufR array[0..N-1] temporaries + BufI array[0..N-1] temporaries + +Output: + Info error code (">0"=OK, "<0"=bad) + RMS training set RMS error + CVRMS leave-one-out RMS error + +Note: + content of all arrays is changed by subroutine; + it doesn't allocate temporaries. + + -- ALGLIB -- + Copyright 11.12.2008 by Bochkanov Sergey *************************************************************************/ -static void dforest_dfsplitc(/* Real */ ae_vector* x, +void dsoptimalsplit2fast(/* Real */ ae_vector* a, /* Integer */ ae_vector* c, + /* Integer */ ae_vector* tiesbuf, /* Integer */ ae_vector* cntbuf, + /* Real */ ae_vector* bufr, + /* Integer */ ae_vector* bufi, ae_int_t n, ae_int_t nc, - ae_int_t flags, + double alpha, ae_int_t* info, double* threshold, - double* e, - /* Real */ ae_vector* sortrbuf, - /* Integer */ ae_vector* sortibuf, + double* rms, + double* cvrms, ae_state *_state) { ae_int_t i; - ae_int_t neq; - ae_int_t nless; - ae_int_t ngreater; - ae_int_t q; - ae_int_t qmin; - ae_int_t qmax; - ae_int_t qcnt; - double cursplit; - ae_int_t nleft; + ae_int_t k; + ae_int_t cl; + ae_int_t tiecount; + double cbest; + double cc; + ae_int_t koptimal; + ae_int_t sl; + ae_int_t sr; double v; - double cure; double w; - double sl; - double sr; + double x; *info = 0; *threshold = 0; - *e = 0; + *rms = 0; + *cvrms = 0; - tagsortfasti(x, c, sortrbuf, sortibuf, n, _state); - *e = ae_maxrealnumber; - *threshold = 0.5*(x->ptr.p_double[0]+x->ptr.p_double[n-1]); - *info = -3; - if( flags/dforest_dfusestrongsplits%2==0 ) + + /* + * Test for errors in inputs + */ + if( n<=0||nc<2 ) + { + *info = -1; + return; + } + for(i=0; i<=n-1; i++) + { + if( c->ptr.p_int[i]<0||c->ptr.p_int[i]>=nc ) + { + *info = -2; + return; + } + } + *info = 1; + + /* + * Tie + */ + dstiefasti(a, c, n, tiesbuf, &tiecount, bufr, bufi, _state); + + /* + * Special case: number of ties is 1. + */ + if( tiecount==1 ) + { + *info = -3; + return; + } + + /* + * General case, number of ties > 1 + */ + for(i=0; i<=2*nc-1; i++) + { + cntbuf->ptr.p_int[i] = 0; + } + for(i=0; i<=n-1; i++) + { + cntbuf->ptr.p_int[nc+c->ptr.p_int[i]] = cntbuf->ptr.p_int[nc+c->ptr.p_int[i]]+1; + } + koptimal = -1; + *threshold = a->ptr.p_double[n-1]; + cbest = ae_maxrealnumber; + sl = 0; + sr = n; + for(k=0; k<=tiecount-2; k++) { /* - * weak splits, split at half + * first, move Kth tie from right to left */ - qcnt = 2; - qmin = 1; - qmax = 1; - } - else - { + for(i=tiesbuf->ptr.p_int[k]; i<=tiesbuf->ptr.p_int[k+1]-1; i++) + { + cl = c->ptr.p_int[i]; + cntbuf->ptr.p_int[cl] = cntbuf->ptr.p_int[cl]+1; + cntbuf->ptr.p_int[nc+cl] = cntbuf->ptr.p_int[nc+cl]-1; + } + sl = sl+(tiesbuf->ptr.p_int[k+1]-tiesbuf->ptr.p_int[k]); + sr = sr-(tiesbuf->ptr.p_int[k+1]-tiesbuf->ptr.p_int[k]); /* - * strong splits: choose best quartile + * Calculate RMS error */ - qcnt = 4; - qmin = 1; - qmax = 3; - } - for(q=qmin; q<=qmax; q++) - { - cursplit = x->ptr.p_double[n*q/qcnt]; - neq = 0; - nless = 0; - ngreater = 0; - for(i=0; i<=n-1; i++) + v = (double)(0); + for(i=0; i<=nc-1; i++) { - if( ae_fp_less(x->ptr.p_double[i],cursplit) ) - { - nless = nless+1; - } - if( ae_fp_eq(x->ptr.p_double[i],cursplit) ) - { - neq = neq+1; - } - if( ae_fp_greater(x->ptr.p_double[i],cursplit) ) - { - ngreater = ngreater+1; - } + w = (double)(cntbuf->ptr.p_int[i]); + v = v+w*ae_sqr(w/sl-1, _state); + v = v+(sl-w)*ae_sqr(w/sl, _state); + w = (double)(cntbuf->ptr.p_int[nc+i]); + v = v+w*ae_sqr(w/sr-1, _state); + v = v+(sr-w)*ae_sqr(w/sr, _state); } - ae_assert(neq!=0, "DFSplitR: NEq=0, something strange!!!", _state); - if( nless!=0||ngreater!=0 ) + v = ae_sqrt(v/(nc*n), _state); + + /* + * Compare with best + */ + x = (double)(2*sl)/(double)(sl+sr)-1; + cc = v*(1-alpha+alpha*ae_sqr(x, _state)); + if( ae_fp_less(cc,cbest) ) { /* - * set threshold between two partitions, with - * some tweaking to avoid problems with floating point - * arithmetics. - * - * The problem is that when you calculates C = 0.5*(A+B) there - * can be no C which lies strictly between A and B (for example, - * there is no floating point number which is - * greater than 1 and less than 1+eps). In such situations - * we choose right side as theshold (remember that - * points which lie on threshold falls to the right side). - */ - if( nlessptr.p_double[nless+neq-1]+x->ptr.p_double[nless+neq]); - nleft = nless+neq; - if( ae_fp_less_eq(cursplit,x->ptr.p_double[nless+neq-1]) ) + * store split + */ + *rms = v; + koptimal = k; + cbest = cc; + + /* + * calculate CVRMS error + */ + *cvrms = (double)(0); + for(i=0; i<=nc-1; i++) + { + if( sl>1 ) { - cursplit = x->ptr.p_double[nless+neq]; + w = (double)(cntbuf->ptr.p_int[i]); + *cvrms = *cvrms+w*ae_sqr((w-1)/(sl-1)-1, _state); + *cvrms = *cvrms+(sl-w)*ae_sqr(w/(sl-1), _state); } - } - else - { - cursplit = 0.5*(x->ptr.p_double[nless-1]+x->ptr.p_double[nless]); - nleft = nless; - if( ae_fp_less_eq(cursplit,x->ptr.p_double[nless-1]) ) + else { - cursplit = x->ptr.p_double[nless]; + w = (double)(cntbuf->ptr.p_int[i]); + *cvrms = *cvrms+w*ae_sqr((double)1/(double)nc-1, _state); + *cvrms = *cvrms+(sl-w)*ae_sqr((double)1/(double)nc, _state); + } + if( sr>1 ) + { + w = (double)(cntbuf->ptr.p_int[nc+i]); + *cvrms = *cvrms+w*ae_sqr((w-1)/(sr-1)-1, _state); + *cvrms = *cvrms+(sr-w)*ae_sqr(w/(sr-1), _state); + } + else + { + w = (double)(cntbuf->ptr.p_int[nc+i]); + *cvrms = *cvrms+w*ae_sqr((double)1/(double)nc-1, _state); + *cvrms = *cvrms+(sr-w)*ae_sqr((double)1/(double)nc, _state); } } - *info = 1; - cure = (double)(0); - for(i=0; i<=2*nc-1; i++) - { - cntbuf->ptr.p_int[i] = 0; - } - for(i=0; i<=nleft-1; i++) - { - cntbuf->ptr.p_int[c->ptr.p_int[i]] = cntbuf->ptr.p_int[c->ptr.p_int[i]]+1; - } - for(i=nleft; i<=n-1; i++) - { - cntbuf->ptr.p_int[nc+c->ptr.p_int[i]] = cntbuf->ptr.p_int[nc+c->ptr.p_int[i]]+1; - } - sl = (double)(nleft); - sr = (double)(n-nleft); - v = (double)(0); - for(i=0; i<=nc-1; i++) - { - w = (double)(cntbuf->ptr.p_int[i]); - v = v+w*ae_sqr(w/sl-1, _state); - v = v+(sl-w)*ae_sqr(w/sl, _state); - w = (double)(cntbuf->ptr.p_int[nc+i]); - v = v+w*ae_sqr(w/sr-1, _state); - v = v+(sr-w)*ae_sqr(w/sr, _state); - } - cure = ae_sqrt(v/(nc*n), _state); - if( ae_fp_less(cure,*e) ) - { - *threshold = cursplit; - *e = cure; - } + *cvrms = ae_sqrt(*cvrms/(nc*n), _state); } } + + /* + * Calculate threshold. + * Code is a bit complicated because there can be such + * numbers that 0.5(A+B) equals to A or B (if A-B=epsilon) + */ + *threshold = 0.5*(a->ptr.p_double[tiesbuf->ptr.p_int[koptimal]]+a->ptr.p_double[tiesbuf->ptr.p_int[koptimal+1]]); + if( ae_fp_less_eq(*threshold,a->ptr.p_double[tiesbuf->ptr.p_int[koptimal]]) ) + { + *threshold = a->ptr.p_double[tiesbuf->ptr.p_int[koptimal+1]]; + } } /************************************************************************* -Makes split on attribute +Automatic non-optimal discretization, internal subroutine. + + -- ALGLIB -- + Copyright 22.05.2008 by Bochkanov Sergey *************************************************************************/ -static void dforest_dfsplitr(/* Real */ ae_vector* x, - /* Real */ ae_vector* y, +void dssplitk(/* Real */ ae_vector* a, + /* Integer */ ae_vector* c, ae_int_t n, - ae_int_t flags, + ae_int_t nc, + ae_int_t kmax, ae_int_t* info, - double* threshold, - double* e, - /* Real */ ae_vector* sortrbuf, - /* Real */ ae_vector* sortrbuf2, + /* Real */ ae_vector* thresholds, + ae_int_t* ni, + double* cve, ae_state *_state) { + ae_frame _frame_block; + ae_vector _a; + ae_vector _c; ae_int_t i; - ae_int_t neq; - ae_int_t nless; - ae_int_t ngreater; - ae_int_t q; - ae_int_t qmin; - ae_int_t qmax; - ae_int_t qcnt; - double cursplit; - ae_int_t nleft; - double v; - double cure; + ae_int_t j; + ae_int_t j1; + ae_int_t k; + ae_vector ties; + ae_int_t tiecount; + ae_vector p1; + ae_vector p2; + ae_vector cnt; + double v2; + ae_int_t bestk; + double bestcve; + ae_vector bestsizes; + double curcve; + ae_vector cursizes; + ae_frame_make(_state, &_frame_block); + memset(&_a, 0, sizeof(_a)); + memset(&_c, 0, sizeof(_c)); + memset(&ties, 0, sizeof(ties)); + memset(&p1, 0, sizeof(p1)); + memset(&p2, 0, sizeof(p2)); + memset(&cnt, 0, sizeof(cnt)); + memset(&bestsizes, 0, sizeof(bestsizes)); + memset(&cursizes, 0, sizeof(cursizes)); + ae_vector_init_copy(&_a, a, _state, ae_true); + a = &_a; + ae_vector_init_copy(&_c, c, _state, ae_true); + c = &_c; *info = 0; - *threshold = 0; - *e = 0; + ae_vector_clear(thresholds); + *ni = 0; + *cve = 0; + ae_vector_init(&ties, 0, DT_INT, _state, ae_true); + ae_vector_init(&p1, 0, DT_INT, _state, ae_true); + ae_vector_init(&p2, 0, DT_INT, _state, ae_true); + ae_vector_init(&cnt, 0, DT_INT, _state, ae_true); + ae_vector_init(&bestsizes, 0, DT_INT, _state, ae_true); + ae_vector_init(&cursizes, 0, DT_INT, _state, ae_true); - tagsortfastr(x, y, sortrbuf, sortrbuf2, n, _state); - *e = ae_maxrealnumber; - *threshold = 0.5*(x->ptr.p_double[0]+x->ptr.p_double[n-1]); - *info = -3; - if( flags/dforest_dfusestrongsplits%2==0 ) + + /* + * Test for errors in inputs + */ + if( (n<=0||nc<2)||kmax<2 ) { - - /* - * weak splits, split at half - */ - qcnt = 2; - qmin = 1; - qmax = 1; + *info = -1; + ae_frame_leave(_state); + return; } - else + for(i=0; i<=n-1; i++) + { + if( c->ptr.p_int[i]<0||c->ptr.p_int[i]>=nc ) + { + *info = -2; + ae_frame_leave(_state); + return; + } + } + *info = 1; + + /* + * Tie + */ + dstie(a, n, &ties, &tiecount, &p1, &p2, _state); + for(i=0; i<=n-1; i++) + { + if( p2.ptr.p_int[i]!=i ) + { + k = c->ptr.p_int[i]; + c->ptr.p_int[i] = c->ptr.p_int[p2.ptr.p_int[i]]; + c->ptr.p_int[p2.ptr.p_int[i]] = k; + } + } + + /* + * Special cases + */ + if( tiecount==1 ) + { + *info = -3; + ae_frame_leave(_state); + return; + } + + /* + * General case: + * 0. allocate arrays + */ + kmax = ae_minint(kmax, tiecount, _state); + ae_vector_set_length(&bestsizes, kmax-1+1, _state); + ae_vector_set_length(&cursizes, kmax-1+1, _state); + ae_vector_set_length(&cnt, nc-1+1, _state); + + /* + * General case: + * 1. prepare "weak" solution (two subintervals, divided at median) + */ + v2 = ae_maxrealnumber; + j = -1; + for(i=1; i<=tiecount-1; i++) + { + if( ae_fp_less(ae_fabs(ties.ptr.p_int[i]-0.5*(n-1), _state),v2) ) + { + v2 = ae_fabs(ties.ptr.p_int[i]-0.5*n, _state); + j = i; + } + } + ae_assert(j>0, "DSSplitK: internal error #1!", _state); + bestk = 2; + bestsizes.ptr.p_int[0] = ties.ptr.p_int[j]; + bestsizes.ptr.p_int[1] = n-j; + bestcve = (double)(0); + for(i=0; i<=nc-1; i++) + { + cnt.ptr.p_int[i] = 0; + } + for(i=0; i<=j-1; i++) + { + bdss_tieaddc(c, &ties, i, nc, &cnt, _state); + } + bestcve = bestcve+bdss_getcv(&cnt, nc, _state); + for(i=0; i<=nc-1; i++) + { + cnt.ptr.p_int[i] = 0; + } + for(i=j; i<=tiecount-1; i++) + { + bdss_tieaddc(c, &ties, i, nc, &cnt, _state); + } + bestcve = bestcve+bdss_getcv(&cnt, nc, _state); + + /* + * General case: + * 2. Use greedy algorithm to find sub-optimal split in O(KMax*N) time + */ + for(k=2; k<=kmax; k++) { /* - * strong splits: choose best quartile + * Prepare greedy K-interval split */ - qcnt = 4; - qmin = 1; - qmax = 3; - } - for(q=qmin; q<=qmax; q++) - { - cursplit = x->ptr.p_double[n*q/qcnt]; - neq = 0; - nless = 0; - ngreater = 0; - for(i=0; i<=n-1; i++) + for(i=0; i<=k-1; i++) { - if( ae_fp_less(x->ptr.p_double[i],cursplit) ) - { - nless = nless+1; - } - if( ae_fp_eq(x->ptr.p_double[i],cursplit) ) + cursizes.ptr.p_int[i] = 0; + } + i = 0; + j = 0; + while(j<=tiecount-1&&i<=k-1) + { + + /* + * Rule: I-th bin is empty, fill it + */ + if( cursizes.ptr.p_int[i]==0 ) { - neq = neq+1; + cursizes.ptr.p_int[i] = ties.ptr.p_int[j+1]-ties.ptr.p_int[j]; + j = j+1; + continue; } - if( ae_fp_greater(x->ptr.p_double[i],cursplit) ) + + /* + * Rule: (K-1-I) bins left, (K-1-I) ties left (1 tie per bin); next bin + */ + if( tiecount-j==k-1-i ) { - ngreater = ngreater+1; + i = i+1; + continue; } - } - ae_assert(neq!=0, "DFSplitR: NEq=0, something strange!!!", _state); - if( nless!=0||ngreater!=0 ) - { /* - * set threshold between two partitions, with - * some tweaking to avoid problems with floating point - * arithmetics. - * - * The problem is that when you calculates C = 0.5*(A+B) there - * can be no C which lies strictly between A and B (for example, - * there is no floating point number which is - * greater than 1 and less than 1+eps). In such situations - * we choose right side as theshold (remember that - * points which lie on threshold falls to the right side). - */ - if( nlessptr.p_double[nless+neq-1]+x->ptr.p_double[nless+neq]); - nleft = nless+neq; - if( ae_fp_less_eq(cursplit,x->ptr.p_double[nless+neq-1]) ) - { - cursplit = x->ptr.p_double[nless+neq]; - } - } - else + * Rule: last bin, always place in current + */ + if( i==k-1 ) { - cursplit = 0.5*(x->ptr.p_double[nless-1]+x->ptr.p_double[nless]); - nleft = nless; - if( ae_fp_less_eq(cursplit,x->ptr.p_double[nless-1]) ) - { - cursplit = x->ptr.p_double[nless]; - } + cursizes.ptr.p_int[i] = cursizes.ptr.p_int[i]+ties.ptr.p_int[j+1]-ties.ptr.p_int[j]; + j = j+1; + continue; } - *info = 1; - cure = (double)(0); - v = (double)(0); - for(i=0; i<=nleft-1; i++) + + /* + * Place J-th tie in I-th bin, or leave for I+1-th bin. + */ + if( ae_fp_less(ae_fabs(cursizes.ptr.p_int[i]+ties.ptr.p_int[j+1]-ties.ptr.p_int[j]-(double)n/(double)k, _state),ae_fabs(cursizes.ptr.p_int[i]-(double)n/(double)k, _state)) ) { - v = v+y->ptr.p_double[i]; + cursizes.ptr.p_int[i] = cursizes.ptr.p_int[i]+ties.ptr.p_int[j+1]-ties.ptr.p_int[j]; + j = j+1; } - v = v/nleft; - for(i=0; i<=nleft-1; i++) + else { - cure = cure+ae_sqr(y->ptr.p_double[i]-v, _state); + i = i+1; } - v = (double)(0); - for(i=nleft; i<=n-1; i++) + } + ae_assert(cursizes.ptr.p_int[k-1]!=0&&j==tiecount, "DSSplitK: internal error #1", _state); + + /* + * Calculate CVE + */ + curcve = (double)(0); + j = 0; + for(i=0; i<=k-1; i++) + { + for(j1=0; j1<=nc-1; j1++) { - v = v+y->ptr.p_double[i]; + cnt.ptr.p_int[j1] = 0; } - v = v/(n-nleft); - for(i=nleft; i<=n-1; i++) + for(j1=j; j1<=j+cursizes.ptr.p_int[i]-1; j1++) { - cure = cure+ae_sqr(y->ptr.p_double[i]-v, _state); + cnt.ptr.p_int[c->ptr.p_int[j1]] = cnt.ptr.p_int[c->ptr.p_int[j1]]+1; } - cure = ae_sqrt(cure/n, _state); - if( ae_fp_less(cure,*e) ) + curcve = curcve+bdss_getcv(&cnt, nc, _state); + j = j+cursizes.ptr.p_int[i]; + } + + /* + * Choose best variant + */ + if( ae_fp_less(curcve,bestcve) ) + { + for(i=0; i<=k-1; i++) { - *threshold = cursplit; - *e = cure; + bestsizes.ptr.p_int[i] = cursizes.ptr.p_int[i]; } + bestcve = curcve; + bestk = k; } } + + /* + * Transform from sizes to thresholds + */ + *cve = bestcve; + *ni = bestk; + ae_vector_set_length(thresholds, *ni-2+1, _state); + j = bestsizes.ptr.p_int[0]; + for(i=1; i<=bestk-1; i++) + { + thresholds->ptr.p_double[i-1] = 0.5*(a->ptr.p_double[j-1]+a->ptr.p_double[j]); + j = j+bestsizes.ptr.p_int[i]; + } + ae_frame_leave(_state); } -void _decisionforest_init(void* _p, ae_state *_state) -{ - decisionforest *p = (decisionforest*)_p; - ae_touch_ptr((void*)p); - ae_vector_init(&p->trees, 0, DT_REAL, _state); -} - - -void _decisionforest_init_copy(void* _dst, void* _src, ae_state *_state) -{ - decisionforest *dst = (decisionforest*)_dst; - decisionforest *src = (decisionforest*)_src; - dst->nvars = src->nvars; - dst->nclasses = src->nclasses; - dst->ntrees = src->ntrees; - dst->bufsize = src->bufsize; - ae_vector_init_copy(&dst->trees, &src->trees, _state); -} - +/************************************************************************* +Automatic optimal discretization, internal subroutine. -void _decisionforest_clear(void* _p) + -- ALGLIB -- + Copyright 22.05.2008 by Bochkanov Sergey +*************************************************************************/ +void dsoptimalsplitk(/* Real */ ae_vector* a, + /* Integer */ ae_vector* c, + ae_int_t n, + ae_int_t nc, + ae_int_t kmax, + ae_int_t* info, + /* Real */ ae_vector* thresholds, + ae_int_t* ni, + double* cve, + ae_state *_state) { - decisionforest *p = (decisionforest*)_p; - ae_touch_ptr((void*)p); - ae_vector_clear(&p->trees); -} - - -void _decisionforest_destroy(void* _p) -{ - decisionforest *p = (decisionforest*)_p; - ae_touch_ptr((void*)p); - ae_vector_destroy(&p->trees); + ae_frame _frame_block; + ae_vector _a; + ae_vector _c; + ae_int_t i; + ae_int_t j; + ae_int_t s; + ae_int_t jl; + ae_int_t jr; + double v2; + ae_vector ties; + ae_int_t tiecount; + ae_vector p1; + ae_vector p2; + double cvtemp; + ae_vector cnt; + ae_vector cnt2; + ae_matrix cv; + ae_matrix splits; + ae_int_t k; + ae_int_t koptimal; + double cvoptimal; + + ae_frame_make(_state, &_frame_block); + memset(&_a, 0, sizeof(_a)); + memset(&_c, 0, sizeof(_c)); + memset(&ties, 0, sizeof(ties)); + memset(&p1, 0, sizeof(p1)); + memset(&p2, 0, sizeof(p2)); + memset(&cnt, 0, sizeof(cnt)); + memset(&cnt2, 0, sizeof(cnt2)); + memset(&cv, 0, sizeof(cv)); + memset(&splits, 0, sizeof(splits)); + ae_vector_init_copy(&_a, a, _state, ae_true); + a = &_a; + ae_vector_init_copy(&_c, c, _state, ae_true); + c = &_c; + *info = 0; + ae_vector_clear(thresholds); + *ni = 0; + *cve = 0; + ae_vector_init(&ties, 0, DT_INT, _state, ae_true); + ae_vector_init(&p1, 0, DT_INT, _state, ae_true); + ae_vector_init(&p2, 0, DT_INT, _state, ae_true); + ae_vector_init(&cnt, 0, DT_INT, _state, ae_true); + ae_vector_init(&cnt2, 0, DT_INT, _state, ae_true); + ae_matrix_init(&cv, 0, 0, DT_REAL, _state, ae_true); + ae_matrix_init(&splits, 0, 0, DT_INT, _state, ae_true); + + + /* + * Test for errors in inputs + */ + if( (n<=0||nc<2)||kmax<2 ) + { + *info = -1; + ae_frame_leave(_state); + return; + } + for(i=0; i<=n-1; i++) + { + if( c->ptr.p_int[i]<0||c->ptr.p_int[i]>=nc ) + { + *info = -2; + ae_frame_leave(_state); + return; + } + } + *info = 1; + + /* + * Tie + */ + dstie(a, n, &ties, &tiecount, &p1, &p2, _state); + for(i=0; i<=n-1; i++) + { + if( p2.ptr.p_int[i]!=i ) + { + k = c->ptr.p_int[i]; + c->ptr.p_int[i] = c->ptr.p_int[p2.ptr.p_int[i]]; + c->ptr.p_int[p2.ptr.p_int[i]] = k; + } + } + + /* + * Special cases + */ + if( tiecount==1 ) + { + *info = -3; + ae_frame_leave(_state); + return; + } + + /* + * General case + * Use dynamic programming to find best split in O(KMax*NC*TieCount^2) time + */ + kmax = ae_minint(kmax, tiecount, _state); + ae_matrix_set_length(&cv, kmax-1+1, tiecount-1+1, _state); + ae_matrix_set_length(&splits, kmax-1+1, tiecount-1+1, _state); + ae_vector_set_length(&cnt, nc-1+1, _state); + ae_vector_set_length(&cnt2, nc-1+1, _state); + for(j=0; j<=nc-1; j++) + { + cnt.ptr.p_int[j] = 0; + } + for(j=0; j<=tiecount-1; j++) + { + bdss_tieaddc(c, &ties, j, nc, &cnt, _state); + splits.ptr.pp_int[0][j] = 0; + cv.ptr.pp_double[0][j] = bdss_getcv(&cnt, nc, _state); + } + for(k=1; k<=kmax-1; k++) + { + for(j=0; j<=nc-1; j++) + { + cnt.ptr.p_int[j] = 0; + } + + /* + * Subtask size J in [K..TieCount-1]: + * optimal K-splitting on ties from 0-th to J-th. + */ + for(j=k; j<=tiecount-1; j++) + { + + /* + * Update Cnt - let it contain classes of ties from K-th to J-th + */ + bdss_tieaddc(c, &ties, j, nc, &cnt, _state); + + /* + * Search for optimal split point S in [K..J] + */ + for(i=0; i<=nc-1; i++) + { + cnt2.ptr.p_int[i] = cnt.ptr.p_int[i]; + } + cv.ptr.pp_double[k][j] = cv.ptr.pp_double[k-1][j-1]+bdss_getcv(&cnt2, nc, _state); + splits.ptr.pp_int[k][j] = j; + for(s=k+1; s<=j; s++) + { + + /* + * Update Cnt2 - let it contain classes of ties from S-th to J-th + */ + bdss_tiesubc(c, &ties, s-1, nc, &cnt2, _state); + + /* + * Calculate CVE + */ + cvtemp = cv.ptr.pp_double[k-1][s-1]+bdss_getcv(&cnt2, nc, _state); + if( ae_fp_less(cvtemp,cv.ptr.pp_double[k][j]) ) + { + cv.ptr.pp_double[k][j] = cvtemp; + splits.ptr.pp_int[k][j] = s; + } + } + } + } + + /* + * Choose best partition, output result + */ + koptimal = -1; + cvoptimal = ae_maxrealnumber; + for(k=0; k<=kmax-1; k++) + { + if( ae_fp_less(cv.ptr.pp_double[k][tiecount-1],cvoptimal) ) + { + cvoptimal = cv.ptr.pp_double[k][tiecount-1]; + koptimal = k; + } + } + ae_assert(koptimal>=0, "DSOptimalSplitK: internal error #1!", _state); + if( koptimal==0 ) + { + + /* + * Special case: best partition is one big interval. + * Even 2-partition is not better. + * This is possible when dealing with "weak" predictor variables. + * + * Make binary split as close to the median as possible. + */ + v2 = ae_maxrealnumber; + j = -1; + for(i=1; i<=tiecount-1; i++) + { + if( ae_fp_less(ae_fabs(ties.ptr.p_int[i]-0.5*(n-1), _state),v2) ) + { + v2 = ae_fabs(ties.ptr.p_int[i]-0.5*(n-1), _state); + j = i; + } + } + ae_assert(j>0, "DSOptimalSplitK: internal error #2!", _state); + ae_vector_set_length(thresholds, 0+1, _state); + thresholds->ptr.p_double[0] = 0.5*(a->ptr.p_double[ties.ptr.p_int[j-1]]+a->ptr.p_double[ties.ptr.p_int[j]]); + *ni = 2; + *cve = (double)(0); + for(i=0; i<=nc-1; i++) + { + cnt.ptr.p_int[i] = 0; + } + for(i=0; i<=j-1; i++) + { + bdss_tieaddc(c, &ties, i, nc, &cnt, _state); + } + *cve = *cve+bdss_getcv(&cnt, nc, _state); + for(i=0; i<=nc-1; i++) + { + cnt.ptr.p_int[i] = 0; + } + for(i=j; i<=tiecount-1; i++) + { + bdss_tieaddc(c, &ties, i, nc, &cnt, _state); + } + *cve = *cve+bdss_getcv(&cnt, nc, _state); + } + else + { + + /* + * General case: 2 or more intervals + * + * NOTE: we initialize both JL and JR (left and right bounds), + * altough algorithm needs only JL. + */ + ae_vector_set_length(thresholds, koptimal-1+1, _state); + *ni = koptimal+1; + *cve = cv.ptr.pp_double[koptimal][tiecount-1]; + jl = splits.ptr.pp_int[koptimal][tiecount-1]; + jr = tiecount-1; + for(k=koptimal; k>=1; k--) + { + thresholds->ptr.p_double[k-1] = 0.5*(a->ptr.p_double[ties.ptr.p_int[jl-1]]+a->ptr.p_double[ties.ptr.p_int[jl]]); + jr = jl-1; + jl = splits.ptr.pp_int[k-1][jl-1]; + } + touchint(&jr, _state); + } + ae_frame_leave(_state); } -void _dfreport_init(void* _p, ae_state *_state) +/************************************************************************* +Internal function +*************************************************************************/ +static double bdss_xlny(double x, double y, ae_state *_state) { - dfreport *p = (dfreport*)_p; - ae_touch_ptr((void*)p); + double result; + + + if( ae_fp_eq(x,(double)(0)) ) + { + result = (double)(0); + } + else + { + result = x*ae_log(y, _state); + } + return result; } -void _dfreport_init_copy(void* _dst, void* _src, ae_state *_state) +/************************************************************************* +Internal function, +returns number of samples of class I in Cnt[I] +*************************************************************************/ +static double bdss_getcv(/* Integer */ ae_vector* cnt, + ae_int_t nc, + ae_state *_state) { - dfreport *dst = (dfreport*)_dst; - dfreport *src = (dfreport*)_src; - dst->relclserror = src->relclserror; - dst->avgce = src->avgce; - dst->rmserror = src->rmserror; - dst->avgerror = src->avgerror; - dst->avgrelerror = src->avgrelerror; - dst->oobrelclserror = src->oobrelclserror; - dst->oobavgce = src->oobavgce; - dst->oobrmserror = src->oobrmserror; - dst->oobavgerror = src->oobavgerror; - dst->oobavgrelerror = src->oobavgrelerror; + ae_int_t i; + double s; + double result; + + + s = (double)(0); + for(i=0; i<=nc-1; i++) + { + s = s+cnt->ptr.p_int[i]; + } + result = (double)(0); + for(i=0; i<=nc-1; i++) + { + result = result-bdss_xlny((double)(cnt->ptr.p_int[i]), cnt->ptr.p_int[i]/(s+nc-1), _state); + } + return result; } -void _dfreport_clear(void* _p) +/************************************************************************* +Internal function, adds number of samples of class I in tie NTie to Cnt[I] +*************************************************************************/ +static void bdss_tieaddc(/* Integer */ ae_vector* c, + /* Integer */ ae_vector* ties, + ae_int_t ntie, + ae_int_t nc, + /* Integer */ ae_vector* cnt, + ae_state *_state) { - dfreport *p = (dfreport*)_p; - ae_touch_ptr((void*)p); + ae_int_t i; + + + for(i=ties->ptr.p_int[ntie]; i<=ties->ptr.p_int[ntie+1]-1; i++) + { + cnt->ptr.p_int[c->ptr.p_int[i]] = cnt->ptr.p_int[c->ptr.p_int[i]]+1; + } } -void _dfreport_destroy(void* _p) +/************************************************************************* +Internal function, subtracts number of samples of class I in tie NTie to Cnt[I] +*************************************************************************/ +static void bdss_tiesubc(/* Integer */ ae_vector* c, + /* Integer */ ae_vector* ties, + ae_int_t ntie, + ae_int_t nc, + /* Integer */ ae_vector* cnt, + ae_state *_state) { - dfreport *p = (dfreport*)_p; - ae_touch_ptr((void*)p); + ae_int_t i; + + + for(i=ties->ptr.p_int[ntie]; i<=ties->ptr.p_int[ntie+1]-1; i++) + { + cnt->ptr.p_int[c->ptr.p_int[i]] = cnt->ptr.p_int[c->ptr.p_int[i]]-1; + } } -void _dfinternalbuffers_init(void* _p, ae_state *_state) +void _cvreport_init(void* _p, ae_state *_state, ae_bool make_automatic) { - dfinternalbuffers *p = (dfinternalbuffers*)_p; + cvreport *p = (cvreport*)_p; ae_touch_ptr((void*)p); - ae_vector_init(&p->treebuf, 0, DT_REAL, _state); - ae_vector_init(&p->idxbuf, 0, DT_INT, _state); - ae_vector_init(&p->tmpbufr, 0, DT_REAL, _state); - ae_vector_init(&p->tmpbufr2, 0, DT_REAL, _state); - ae_vector_init(&p->tmpbufi, 0, DT_INT, _state); - ae_vector_init(&p->classibuf, 0, DT_INT, _state); - ae_vector_init(&p->sortrbuf, 0, DT_REAL, _state); - ae_vector_init(&p->sortrbuf2, 0, DT_REAL, _state); - ae_vector_init(&p->sortibuf, 0, DT_INT, _state); - ae_vector_init(&p->varpool, 0, DT_INT, _state); - ae_vector_init(&p->evsbin, 0, DT_BOOL, _state); - ae_vector_init(&p->evssplits, 0, DT_REAL, _state); } -void _dfinternalbuffers_init_copy(void* _dst, void* _src, ae_state *_state) +void _cvreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) { - dfinternalbuffers *dst = (dfinternalbuffers*)_dst; - dfinternalbuffers *src = (dfinternalbuffers*)_src; - ae_vector_init_copy(&dst->treebuf, &src->treebuf, _state); - ae_vector_init_copy(&dst->idxbuf, &src->idxbuf, _state); - ae_vector_init_copy(&dst->tmpbufr, &src->tmpbufr, _state); - ae_vector_init_copy(&dst->tmpbufr2, &src->tmpbufr2, _state); - ae_vector_init_copy(&dst->tmpbufi, &src->tmpbufi, _state); - ae_vector_init_copy(&dst->classibuf, &src->classibuf, _state); - ae_vector_init_copy(&dst->sortrbuf, &src->sortrbuf, _state); - ae_vector_init_copy(&dst->sortrbuf2, &src->sortrbuf2, _state); - ae_vector_init_copy(&dst->sortibuf, &src->sortibuf, _state); - ae_vector_init_copy(&dst->varpool, &src->varpool, _state); - ae_vector_init_copy(&dst->evsbin, &src->evsbin, _state); - ae_vector_init_copy(&dst->evssplits, &src->evssplits, _state); + cvreport *dst = (cvreport*)_dst; + cvreport *src = (cvreport*)_src; + dst->relclserror = src->relclserror; + dst->avgce = src->avgce; + dst->rmserror = src->rmserror; + dst->avgerror = src->avgerror; + dst->avgrelerror = src->avgrelerror; } -void _dfinternalbuffers_clear(void* _p) +void _cvreport_clear(void* _p) { - dfinternalbuffers *p = (dfinternalbuffers*)_p; + cvreport *p = (cvreport*)_p; ae_touch_ptr((void*)p); - ae_vector_clear(&p->treebuf); - ae_vector_clear(&p->idxbuf); - ae_vector_clear(&p->tmpbufr); - ae_vector_clear(&p->tmpbufr2); - ae_vector_clear(&p->tmpbufi); - ae_vector_clear(&p->classibuf); - ae_vector_clear(&p->sortrbuf); - ae_vector_clear(&p->sortrbuf2); - ae_vector_clear(&p->sortibuf); - ae_vector_clear(&p->varpool); - ae_vector_clear(&p->evsbin); - ae_vector_clear(&p->evssplits); } -void _dfinternalbuffers_destroy(void* _p) +void _cvreport_destroy(void* _p) { - dfinternalbuffers *p = (dfinternalbuffers*)_p; + cvreport *p = (cvreport*)_p; ae_touch_ptr((void*)p); - ae_vector_destroy(&p->treebuf); - ae_vector_destroy(&p->idxbuf); - ae_vector_destroy(&p->tmpbufr); - ae_vector_destroy(&p->tmpbufr2); - ae_vector_destroy(&p->tmpbufi); - ae_vector_destroy(&p->classibuf); - ae_vector_destroy(&p->sortrbuf); - ae_vector_destroy(&p->sortrbuf2); - ae_vector_destroy(&p->sortibuf); - ae_vector_destroy(&p->varpool); - ae_vector_destroy(&p->evsbin); - ae_vector_destroy(&p->evssplits); } +#endif +#if defined(AE_COMPILE_MLPBASE) || !defined(AE_PARTIAL_BUILD) /************************************************************************* -Linear regression - -Subroutine builds model: - - Y = A(0)*X[0] + ... + A(N-1)*X[N-1] + A(N) - -and model found in ALGLIB format, covariation matrix, training set errors -(rms, average, average relative) and leave-one-out cross-validation -estimate of the generalization error. CV estimate calculated using fast -algorithm with O(NPoints*NVars) complexity. - -When covariation matrix is calculated standard deviations of function -values are assumed to be equal to RMS error on the training set. - -INPUT PARAMETERS: - XY - training set, array [0..NPoints-1,0..NVars]: - * NVars columns - independent variables - * last column - dependent variable - NPoints - training set size, NPoints>NVars+1 - NVars - number of independent variables - -OUTPUT PARAMETERS: - Info - return code: - * -255, in case of unknown internal error - * -4, if internal SVD subroutine haven't converged - * -1, if incorrect parameters was passed (NPointsrmserror, _state)*npoints/(npoints-nvars-1); - for(i=0; i<=nvars; i++) - { - ae_v_muld(&ar->c.ptr.pp_double[i][0], 1, ae_v_len(0,nvars), sigma2); - } - ae_frame_leave(_state); + result = mlpbase_gradbasecasecost; + return result; } /************************************************************************* -Linear regression +This function returns number of elements in subset of dataset which is +required for gradient calculation problem to be splitted. +*************************************************************************/ +ae_int_t mlpgradsplitsize(ae_state *_state) +{ + ae_int_t result; -Variant of LRBuild which uses vector of standatd deviations (errors in -function values). -INPUT PARAMETERS: - XY - training set, array [0..NPoints-1,0..NVars]: - * NVars columns - independent variables - * last column - dependent variable - S - standard deviations (errors in function values) - array[0..NPoints-1], S[i]>0. - NPoints - training set size, NPoints>NVars+1 - NVars - number of independent variables + result = mlpbase_microbatchsize; + return result; +} -OUTPUT PARAMETERS: - Info - return code: - * -255, in case of unknown internal error - * -4, if internal SVD subroutine haven't converged - * -1, if incorrect parameters was passed (NPoints