diff --git a/README.md b/README.md index adda337..2accee0 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ This project is a collection of TypeScript machine learning helpers and utilitie + ``` ███ ███ ███████ ███ ███ ██ diff --git a/dist/index.d.ts b/dist/index.d.ts index 197e255..5fb38de 100644 --- a/dist/index.d.ts +++ b/dist/index.d.ts @@ -22,7 +22,7 @@ declare module 'mz-ml' { private static initZeroArray; private shuffle; private gradientDescent; - train(): (number | number[])[]; + fit(): (number | number[])[]; predict(features: number[]): number; rSquared(): number; meanSquaredError(): number; diff --git a/dist/mz-ml.esm.js b/dist/mz-ml.esm.js index a0c7e6e..da7c296 100644 --- a/dist/mz-ml.esm.js +++ b/dist/mz-ml.esm.js @@ -4,5 +4,5 @@ A collection of TypeScript-based ML helpers. https://github.com/mzusin/mz-ml Copyright (c) 2023-present, Miriam Zusin */ -var m=Object.defineProperty;var c=Math.pow,d=(f,e,t)=>e in f?m(f,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):f[e]=t;var l=(f,e,t)=>(d(f,typeof e!="symbol"?e+"":e,t),t);var g=class{constructor(e){l(this,"options");l(this,"weights");l(this,"bias");l(this,"features");l(this,"labels");l(this,"n");l(this,"batchSize");l(this,"pearson",()=>{if(this.features.length<=0||this.labels.length<=0)return[];let e=[],t=this.labels.reduce((s,i)=>s+i,0)/this.labels.length;for(let s=0;sr[s]),o=n.reduce((r,u)=>r+u,0)/n.length;for(let r=0;r0?this.features[0].length:0,this.weights=g.initZeroArray(this.n),this.weights.length=this.n,this.weights.fill(0),this.bias=0,this.batchSize=(t=this.options.batchSize)!=null?t:this.features.length}static initZeroArray(e){let t=[];return t.length=e,t.fill(0),t}shuffle(){let e=[];for(let t=0;t0;t--){let s=Math.floor(Math.random()*(t+1));[e[t],e[s]]=[e[s],e[t]]}for(let t=this.features.length-1;t>0;t--)[this.features[t],this.features[e[t]]]=[this.features[e[t]],this.features[t]],[this.labels[t],this.labels[e[t]]]=[this.labels[e[t]],this.labels[t]]}gradientDescent(e,t){let s=g.initZeroArray(this.n),i=0;for(let n=0;ni+h)/this.labels.length;for(let i=0;ie in f?m(f,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):f[e]=t;var l=(f,e,t)=>(d(f,typeof e!="symbol"?e+"":e,t),t);var g=class{constructor(e){l(this,"options");l(this,"weights");l(this,"bias");l(this,"features");l(this,"labels");l(this,"n");l(this,"batchSize");l(this,"pearson",()=>{if(this.features.length<=0||this.labels.length<=0)return[];let e=[],t=this.labels.reduce((s,i)=>s+i,0)/this.labels.length;for(let s=0;sr[s]),o=n.reduce((r,u)=>r+u,0)/n.length;for(let r=0;r0?this.features[0].length:0,this.weights=g.initZeroArray(this.n),this.weights.length=this.n,this.weights.fill(0),this.bias=0,this.batchSize=(t=this.options.batchSize)!=null?t:this.features.length}static initZeroArray(e){let t=[];return t.length=e,t.fill(0),t}shuffle(){let e=[];for(let t=0;t0;t--){let s=Math.floor(Math.random()*(t+1));[e[t],e[s]]=[e[s],e[t]]}for(let t=this.features.length-1;t>0;t--)[this.features[t],this.features[e[t]]]=[this.features[e[t]],this.features[t]],[this.labels[t],this.labels[e[t]]]=[this.labels[e[t]],this.labels[t]]}gradientDescent(e,t){let s=g.initZeroArray(this.n),i=0;for(let n=0;ni+h)/this.labels.length;for(let i=0;i 0 ? this.features[0].length : 0;\n\n // Initialize weights to zero\n this.weights = LinearRegression.initZeroArray(this.n);\n this.weights.length = this.n;\n this.weights.fill(0);\n\n this.bias = 0;\n\n this.batchSize = this.options.batchSize ?? this.features.length;\n }\n\n private static initZeroArray(len: number) {\n const arr: number[] = [];\n arr.length = len;\n arr.fill(0);\n return arr;\n }\n\n private shuffle() {\n const indices: number[] = [];\n for(let i=0; i 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [indices[i], indices[j]] = [indices[j], indices[i]];\n }\n\n for (let i = this.features.length - 1; i > 0; i--) {\n [this.features[i], this.features[indices[i]]] = [this.features[indices[i]], this.features[i]];\n [this.labels[i], this.labels[indices[i]]] = [this.labels[indices[i]], this.labels[i]];\n }\n }\n\n private gradientDescent(batchFeatures: number[][], batchLabels: number[]) : [ number[], number ] {\n\n const mGradientSums = LinearRegression.initZeroArray(this.n);\n let bGradientSum = 0;\n\n for (let i = 0; i < batchFeatures.length; i++) {\n\n const _features: number[] = batchFeatures[i];\n\n const actualValue = batchLabels[i];\n const predictedValue = this.predict(_features);\n const diff = actualValue - predictedValue;\n\n // dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n for (let j = 0; j < this.n; j++) {\n mGradientSums[j] += -2 * _features[j] * diff;\n }\n\n // dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n bGradientSum += -2 * diff;\n }\n\n // Update weights and bias using learning rate\n const newWeights = [];\n\n for(let i=0; i sum + x) / this.labels.length; // yMean\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n residualSumOfSquares += (actualValue - predictedValue) ** 2;\n totalSumOfSquares += (actualValue - meanOfActualValues) ** 2;\n }\n\n return 1 - (residualSumOfSquares / totalSumOfSquares);\n }\n\n /**\n * MSE = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * The ideal value of Mean Squared Error (MSE) is 0.\n * Achieving an MSE of 0 would mean that the model perfectly predicts the target variable\n * for every data point in the training set. However, it's important to note\n * that achieving an MSE of exactly 0 is extremely rare and often unrealistic, especially with real-world data.\n */\n meanSquaredError() {\n if(this.features.length <= 0) return 0;\n\n let mse = 0;\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n mse += (actualValue - predictedValue) ** 2;\n }\n\n mse /= this.features.length;\n\n return mse;\n }\n\n /**\n * Compute the Pearson correlation coefficient.\n * --------------------------------------------\n * It is a statistical measure that quantifies the strength and direction of the linear relationship\n * between two variables. It's commonly used to assess the strength of association\n * between two continuous variables.\n *\n * Range [-1, 1]\n * r=1 indicates a perfect positive linear relationship,\n * meaning that as one variable increases, the other variable increases proportionally.\n *\n * r=\u22121 indicates a perfect negative linear relationship, meaning that as one variable increases,\n * the other variable decreases proportionally.\n *\n * r= 0 indicates no linear relationship between the variables.\n */\n pearson = () : number[] => {\n if (this.features.length <= 0 || this.labels.length <= 0) return [];\n\n const pearsonCoefficients: number[] = [];\n const yMean = this.labels.reduce((sum, y) => sum + y, 0) / this.labels.length;\n\n for (let featureIndex = 0; featureIndex < this.n; featureIndex++) {\n let sumXY = 0; // Sum of the product of (x - xMean) and (y - yMean)\n let sumX2 = 0; // Sum of squared differences between x and xMean\n let sumY2 = 0; // Sum of squared differences between y and yMean\n\n const xValues = this.features.map(feature => feature[featureIndex]);\n const xMean = xValues.reduce((sum, x) => sum + x, 0) / xValues.length;\n\n for (let i = 0; i < this.features.length; i++) {\n const x = this.features[i][featureIndex];\n const y = this.labels[i];\n\n sumXY += (x - xMean) * (y - yMean);\n sumX2 += (x - xMean) ** 2;\n sumY2 += (y - yMean) ** 2;\n }\n\n pearsonCoefficients.push((sumX2 === 0 || sumY2 === 0) ? 0 : (sumXY / Math.sqrt(sumX2 * sumY2)));\n }\n\n return pearsonCoefficients;\n }\n\n}"], - "mappings": ";;;;;;mLAwCO,IAAMA,EAAN,KAAuB,CAY1B,YAAYC,EAAmC,CAV/CC,EAAA,gBACAA,EAAA,gBACAA,EAAA,aAEAA,EAAA,iBACAA,EAAA,eACAA,EAAA,UAEAA,EAAA,kBAyMAA,EAAA,eAAU,IAAiB,CACvB,GAAI,KAAK,SAAS,QAAU,GAAK,KAAK,OAAO,QAAU,EAAG,MAAO,CAAC,EAElE,IAAMC,EAAgC,CAAC,EACjCC,EAAQ,KAAK,OAAO,OAAO,CAACC,EAAKC,IAAMD,EAAMC,EAAG,CAAC,EAAI,KAAK,OAAO,OAEvE,QAASC,EAAe,EAAGA,EAAe,KAAK,EAAGA,IAAgB,CAC9D,IAAIC,EAAQ,EACRC,EAAQ,EACRC,EAAQ,EAENC,EAAU,KAAK,SAAS,IAAIC,GAAWA,EAAQL,CAAY,CAAC,EAC5DM,EAAQF,EAAQ,OAAO,CAACN,EAAKS,IAAMT,EAAMS,EAAG,CAAC,EAAIH,EAAQ,OAE/D,QAASI,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMD,EAAI,KAAK,SAASC,CAAC,EAAER,CAAY,EACjCD,EAAI,KAAK,OAAOS,CAAC,EAEvBP,IAAUM,EAAID,IAAUP,EAAIF,GAC5BK,GAAUO,EAAAF,EAAID,EAAU,GACxBH,GAAUM,EAAAV,EAAIF,EAAU,EAC5B,CAEAD,EAAoB,KAAMM,IAAU,GAAKC,IAAU,EAAK,EAAKF,EAAQ,KAAK,KAAKC,EAAQC,CAAK,CAAE,CAClG,CAEA,OAAOP,CACX,GAtRJ,IAAAc,EAqDQ,KAAK,QAAUhB,EAEf,KAAK,SAAW,CAAC,GAAG,KAAK,QAAQ,QAAQ,EACzC,KAAK,OAAS,CAAC,GAAG,KAAK,QAAQ,MAAM,EACrC,KAAK,EAAI,KAAK,SAAS,OAAS,EAAI,KAAK,SAAS,CAAC,EAAE,OAAS,EAG9D,KAAK,QAAUD,EAAiB,cAAc,KAAK,CAAC,EACpD,KAAK,QAAQ,OAAS,KAAK,EAC3B,KAAK,QAAQ,KAAK,CAAC,EAEnB,KAAK,KAAO,EAEZ,KAAK,WAAYiB,EAAA,KAAK,QAAQ,YAAb,KAAAA,EAA0B,KAAK,SAAS,MAC7D,CAEA,OAAe,cAAcC,EAAa,CACtC,IAAMC,EAAgB,CAAC,EACvB,OAAAA,EAAI,OAASD,EACbC,EAAI,KAAK,CAAC,EACHA,CACX,CAEQ,SAAU,CACd,IAAMC,EAAoB,CAAC,EAC3B,QAAQL,EAAE,EAAGA,EAAE,KAAK,EAAGA,IACnBK,EAAQ,KAAKL,CAAC,EAGlB,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAAK,CAC/C,IAAMM,EAAI,KAAK,MAAM,KAAK,OAAO,GAAKN,EAAI,EAAE,EAC5C,CAACK,EAAQL,CAAC,EAAGK,EAAQC,CAAC,CAAC,EAAI,CAACD,EAAQC,CAAC,EAAGD,EAAQL,CAAC,CAAC,CACtD,CAEA,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAC1C,CAAC,KAAK,SAASA,CAAC,EAAG,KAAK,SAASK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,SAASK,EAAQL,CAAC,CAAC,EAAG,KAAK,SAASA,CAAC,CAAC,EAC5F,CAAC,KAAK,OAAOA,CAAC,EAAG,KAAK,OAAOK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,OAAOK,EAAQL,CAAC,CAAC,EAAG,KAAK,OAAOA,CAAC,CAAC,CAE5F,CAEQ,gBAAgBO,EAA2BC,EAA8C,CAE7F,IAAMC,EAAgBxB,EAAiB,cAAc,KAAK,CAAC,EACvDyB,EAAe,EAEnB,QAASV,EAAI,EAAGA,EAAIO,EAAc,OAAQP,IAAK,CAE3C,IAAMW,EAAsBJ,EAAcP,CAAC,EAErCY,EAAcJ,EAAYR,CAAC,EAC3Ba,EAAiB,KAAK,QAAQF,CAAS,EACvCG,EAAOF,EAAcC,EAG3B,QAASP,EAAI,EAAGA,EAAI,KAAK,EAAGA,IACxBG,EAAcH,CAAC,GAAK,GAAKK,EAAUL,CAAC,EAAIQ,EAI5CJ,GAAgB,GAAKI,CACzB,CAGA,IAAMC,EAAa,CAAC,EAEpB,QAAQf,EAAE,EAAGA,EAAE,KAAK,QAAQ,OAAQA,IAAK,CAIrC,IAAMgB,EAHU,KAAK,QAAQhB,CAAC,EAGD,KAAK,QAAQ,aAAe,KAAK,UAAaS,EAAcT,CAAC,EAC1Fe,EAAW,KAAKC,CAAS,CAC7B,CAGA,IAAMC,EAAU,KAAK,KAAQ,KAAK,QAAQ,aAAe,KAAK,UAAaP,EAE3E,MAAO,CAACK,EAAYE,CAAO,CAC/B,CAEA,OAAQ,CACJ,QAAQjB,EAAI,EAAGA,EAAI,KAAK,QAAQ,OAAQA,IAAK,CAErC,KAAK,QAAQ,SACb,KAAK,QAAQ,EAIjB,QAASM,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,GAAK,KAAK,UAAW,CAE3D,IAAMC,EAAgB,KAAK,SAAS,MAAMD,EAAGA,EAAI,KAAK,SAAS,EACzDE,EAAc,KAAK,OAAO,MAAMF,EAAGA,EAAI,KAAK,SAAS,EAErD,CAACS,EAAYE,CAAO,EAAI,KAAK,gBAAgBV,EAAeC,CAAW,EAEzE,OAAO,KAAK,QAAQ,gBAAmB,YACvC,KAAK,QAAQ,eAAeR,EAAG,KAAK,QAAQ,OAAQe,EAAYE,CAAO,EAG3E,KAAK,QAAUF,EACf,KAAK,KAAOE,CAChB,CACJ,CAEA,MAAO,CAAC,KAAK,QAAS,KAAK,IAAI,CACnC,CAKA,QAAQC,EAAoB,CAExB,GAAIA,EAAS,SAAW,KAAK,QAAQ,OACjC,MAAM,IAAI,MAAM,0DAA0D,EAK9E,IAAIC,EAAa,KAAK,KAEtB,QAASnB,EAAI,EAAGA,EAAIkB,EAAS,OAAQlB,IACjCmB,GAAcD,EAASlB,CAAC,EAAI,KAAK,QAAQA,CAAC,EAG9C,OAAOmB,CACX,CAgBA,UAAW,CACP,IAAIC,EAAuB,EACvBC,EAAoB,EAElBC,EAAqB,KAAK,OAAO,QAAU,EAAI,EACjD,KAAK,OAAO,OAAO,CAAChC,EAAKS,IAAMT,EAAMS,CAAC,EAAI,KAAK,OAAO,OAE1D,QAAS,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,IAAK,CAC3C,IAAMa,EAAc,KAAK,OAAO,CAAC,EAC3BC,EAAiB,KAAK,QAAQ,KAAK,SAAS,CAAC,CAAC,EAEpDO,GAAyBnB,EAAAW,EAAcC,EAAmB,GAC1DQ,GAAsBpB,EAAAW,EAAcU,EAAuB,EAC/D,CAEA,MAAO,GAAKF,EAAuBC,CACvC,CASA,kBAAmB,CACf,GAAG,KAAK,SAAS,QAAU,EAAG,MAAO,GAErC,IAAIE,EAAM,EAEV,QAASvB,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMY,EAAc,KAAK,OAAOZ,CAAC,EAC3Ba,EAAiB,KAAK,QAAQ,KAAK,SAASb,CAAC,CAAC,EAEpDuB,GAAQtB,EAAAW,EAAcC,EAAmB,EAC7C,CAEA,OAAAU,GAAO,KAAK,SAAS,OAEdA,CACX,CA+CJ", + "sourcesContent": ["import { ILinearRegressionOptions } from '../interfaces';\n\n/**\n * Linear Regression\n *\n * Mean Squared Error (MSE): Error function = Loss function\n * E = (1/n) * sum_from_0_to_n((actual_value - predicted_value)^2)\n * E = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * ---------------------------------------------------------\n * Goal: Minimize the error function - find (m, b) with the lowest possible E.\n * How:\n *\n * - Take partial derivative with respect m and also with respect b.\n * This helps to find the \"m\" that maximally increase E,\n * and \"b\" that maximally increase E (the steepest ascent).\n *\n * - After we found them, we get the opposite direction\n * to find the way to decrease E (the steepest descent).\n * ---------------------------------------------------------\n *\n * How to calculate partial derivative of \"m\"?\n * dE/dm = (1/n) * sum_from_0_to_n(2 * (actual_value - (mx + b)) * (-x))\n * dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n * ---------------------------------------------------------\n *\n * How to calculate partial derivative of \"b\"?\n * dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n * ---------------------------------------------------------\n *\n * After the derivatives are found (the steepest ascent)\n * we need to find the steepest descent:\n *\n * new_m = current_m - learning_rate * dE/dm\n * new_b = current_b - learning_rate * dE/db\n *\n * General Form:\n * ------------\n * y = w1*x1 + w2*x2 + \u2026 + wn*xn + b\n * [w1, ..., wn] = weights, b = bias\n */\nexport class LinearRegression {\n\n options: ILinearRegressionOptions;\n weights: number[];\n bias: number;\n\n features: number[][];\n labels: number[];\n n: number;\n\n batchSize: number;\n\n constructor(options: ILinearRegressionOptions) {\n this.options = options;\n\n this.features = [...this.options.features];\n this.labels = [...this.options.labels];\n this.n = this.features.length > 0 ? this.features[0].length : 0;\n\n // Initialize weights to zero\n this.weights = LinearRegression.initZeroArray(this.n);\n this.weights.length = this.n;\n this.weights.fill(0);\n\n this.bias = 0;\n\n this.batchSize = this.options.batchSize ?? this.features.length;\n }\n\n private static initZeroArray(len: number) {\n const arr: number[] = [];\n arr.length = len;\n arr.fill(0);\n return arr;\n }\n\n private shuffle() {\n const indices: number[] = [];\n for(let i=0; i 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [indices[i], indices[j]] = [indices[j], indices[i]];\n }\n\n for (let i = this.features.length - 1; i > 0; i--) {\n [this.features[i], this.features[indices[i]]] = [this.features[indices[i]], this.features[i]];\n [this.labels[i], this.labels[indices[i]]] = [this.labels[indices[i]], this.labels[i]];\n }\n }\n\n private gradientDescent(batchFeatures: number[][], batchLabels: number[]) : [ number[], number ] {\n\n const mGradientSums = LinearRegression.initZeroArray(this.n);\n let bGradientSum = 0;\n\n for (let i = 0; i < batchFeatures.length; i++) {\n\n const _features: number[] = batchFeatures[i];\n\n const actualValue = batchLabels[i];\n const predictedValue = this.predict(_features);\n const diff = actualValue - predictedValue;\n\n // dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n for (let j = 0; j < this.n; j++) {\n mGradientSums[j] += -2 * _features[j] * diff;\n }\n\n // dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n bGradientSum += -2 * diff;\n }\n\n // Update weights and bias using learning rate\n const newWeights = [];\n\n for(let i=0; i sum + x) / this.labels.length; // yMean\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n residualSumOfSquares += (actualValue - predictedValue) ** 2;\n totalSumOfSquares += (actualValue - meanOfActualValues) ** 2;\n }\n\n return 1 - (residualSumOfSquares / totalSumOfSquares);\n }\n\n /**\n * MSE = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * The ideal value of Mean Squared Error (MSE) is 0.\n * Achieving an MSE of 0 would mean that the model perfectly predicts the target variable\n * for every data point in the training set. However, it's important to note\n * that achieving an MSE of exactly 0 is extremely rare and often unrealistic, especially with real-world data.\n */\n meanSquaredError() {\n if(this.features.length <= 0) return 0;\n\n let mse = 0;\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n mse += (actualValue - predictedValue) ** 2;\n }\n\n mse /= this.features.length;\n\n return mse;\n }\n\n /**\n * Compute the Pearson correlation coefficient.\n * --------------------------------------------\n * It is a statistical measure that quantifies the strength and direction of the linear relationship\n * between two variables. It's commonly used to assess the strength of association\n * between two continuous variables.\n *\n * Range [-1, 1]\n * r=1 indicates a perfect positive linear relationship,\n * meaning that as one variable increases, the other variable increases proportionally.\n *\n * r=\u22121 indicates a perfect negative linear relationship, meaning that as one variable increases,\n * the other variable decreases proportionally.\n *\n * r= 0 indicates no linear relationship between the variables.\n */\n pearson = () : number[] => {\n if (this.features.length <= 0 || this.labels.length <= 0) return [];\n\n const pearsonCoefficients: number[] = [];\n const yMean = this.labels.reduce((sum, y) => sum + y, 0) / this.labels.length;\n\n for (let featureIndex = 0; featureIndex < this.n; featureIndex++) {\n let sumXY = 0; // Sum of the product of (x - xMean) and (y - yMean)\n let sumX2 = 0; // Sum of squared differences between x and xMean\n let sumY2 = 0; // Sum of squared differences between y and yMean\n\n const xValues = this.features.map(feature => feature[featureIndex]);\n const xMean = xValues.reduce((sum, x) => sum + x, 0) / xValues.length;\n\n for (let i = 0; i < this.features.length; i++) {\n const x = this.features[i][featureIndex];\n const y = this.labels[i];\n\n sumXY += (x - xMean) * (y - yMean);\n sumX2 += (x - xMean) ** 2;\n sumY2 += (y - yMean) ** 2;\n }\n\n pearsonCoefficients.push((sumX2 === 0 || sumY2 === 0) ? 0 : (sumXY / Math.sqrt(sumX2 * sumY2)));\n }\n\n return pearsonCoefficients;\n }\n\n}"], + "mappings": ";;;;;;mLAwCO,IAAMA,EAAN,KAAuB,CAY1B,YAAYC,EAAmC,CAV/CC,EAAA,gBACAA,EAAA,gBACAA,EAAA,aAEAA,EAAA,iBACAA,EAAA,eACAA,EAAA,UAEAA,EAAA,kBAyMAA,EAAA,eAAU,IAAiB,CACvB,GAAI,KAAK,SAAS,QAAU,GAAK,KAAK,OAAO,QAAU,EAAG,MAAO,CAAC,EAElE,IAAMC,EAAgC,CAAC,EACjCC,EAAQ,KAAK,OAAO,OAAO,CAACC,EAAKC,IAAMD,EAAMC,EAAG,CAAC,EAAI,KAAK,OAAO,OAEvE,QAASC,EAAe,EAAGA,EAAe,KAAK,EAAGA,IAAgB,CAC9D,IAAIC,EAAQ,EACRC,EAAQ,EACRC,EAAQ,EAENC,EAAU,KAAK,SAAS,IAAIC,GAAWA,EAAQL,CAAY,CAAC,EAC5DM,EAAQF,EAAQ,OAAO,CAACN,EAAKS,IAAMT,EAAMS,EAAG,CAAC,EAAIH,EAAQ,OAE/D,QAASI,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMD,EAAI,KAAK,SAASC,CAAC,EAAER,CAAY,EACjCD,EAAI,KAAK,OAAOS,CAAC,EAEvBP,IAAUM,EAAID,IAAUP,EAAIF,GAC5BK,GAAUO,EAAAF,EAAID,EAAU,GACxBH,GAAUM,EAAAV,EAAIF,EAAU,EAC5B,CAEAD,EAAoB,KAAMM,IAAU,GAAKC,IAAU,EAAK,EAAKF,EAAQ,KAAK,KAAKC,EAAQC,CAAK,CAAE,CAClG,CAEA,OAAOP,CACX,GAtRJ,IAAAc,EAqDQ,KAAK,QAAUhB,EAEf,KAAK,SAAW,CAAC,GAAG,KAAK,QAAQ,QAAQ,EACzC,KAAK,OAAS,CAAC,GAAG,KAAK,QAAQ,MAAM,EACrC,KAAK,EAAI,KAAK,SAAS,OAAS,EAAI,KAAK,SAAS,CAAC,EAAE,OAAS,EAG9D,KAAK,QAAUD,EAAiB,cAAc,KAAK,CAAC,EACpD,KAAK,QAAQ,OAAS,KAAK,EAC3B,KAAK,QAAQ,KAAK,CAAC,EAEnB,KAAK,KAAO,EAEZ,KAAK,WAAYiB,EAAA,KAAK,QAAQ,YAAb,KAAAA,EAA0B,KAAK,SAAS,MAC7D,CAEA,OAAe,cAAcC,EAAa,CACtC,IAAMC,EAAgB,CAAC,EACvB,OAAAA,EAAI,OAASD,EACbC,EAAI,KAAK,CAAC,EACHA,CACX,CAEQ,SAAU,CACd,IAAMC,EAAoB,CAAC,EAC3B,QAAQL,EAAE,EAAGA,EAAE,KAAK,EAAGA,IACnBK,EAAQ,KAAKL,CAAC,EAGlB,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAAK,CAC/C,IAAMM,EAAI,KAAK,MAAM,KAAK,OAAO,GAAKN,EAAI,EAAE,EAC5C,CAACK,EAAQL,CAAC,EAAGK,EAAQC,CAAC,CAAC,EAAI,CAACD,EAAQC,CAAC,EAAGD,EAAQL,CAAC,CAAC,CACtD,CAEA,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAC1C,CAAC,KAAK,SAASA,CAAC,EAAG,KAAK,SAASK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,SAASK,EAAQL,CAAC,CAAC,EAAG,KAAK,SAASA,CAAC,CAAC,EAC5F,CAAC,KAAK,OAAOA,CAAC,EAAG,KAAK,OAAOK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,OAAOK,EAAQL,CAAC,CAAC,EAAG,KAAK,OAAOA,CAAC,CAAC,CAE5F,CAEQ,gBAAgBO,EAA2BC,EAA8C,CAE7F,IAAMC,EAAgBxB,EAAiB,cAAc,KAAK,CAAC,EACvDyB,EAAe,EAEnB,QAASV,EAAI,EAAGA,EAAIO,EAAc,OAAQP,IAAK,CAE3C,IAAMW,EAAsBJ,EAAcP,CAAC,EAErCY,EAAcJ,EAAYR,CAAC,EAC3Ba,EAAiB,KAAK,QAAQF,CAAS,EACvCG,EAAOF,EAAcC,EAG3B,QAASP,EAAI,EAAGA,EAAI,KAAK,EAAGA,IACxBG,EAAcH,CAAC,GAAK,GAAKK,EAAUL,CAAC,EAAIQ,EAI5CJ,GAAgB,GAAKI,CACzB,CAGA,IAAMC,EAAa,CAAC,EAEpB,QAAQf,EAAE,EAAGA,EAAE,KAAK,QAAQ,OAAQA,IAAK,CAIrC,IAAMgB,EAHU,KAAK,QAAQhB,CAAC,EAGD,KAAK,QAAQ,aAAe,KAAK,UAAaS,EAAcT,CAAC,EAC1Fe,EAAW,KAAKC,CAAS,CAC7B,CAGA,IAAMC,EAAU,KAAK,KAAQ,KAAK,QAAQ,aAAe,KAAK,UAAaP,EAE3E,MAAO,CAACK,EAAYE,CAAO,CAC/B,CAEA,KAAM,CACF,QAAQjB,EAAI,EAAGA,EAAI,KAAK,QAAQ,OAAQA,IAAK,CAErC,KAAK,QAAQ,SACb,KAAK,QAAQ,EAIjB,QAASM,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,GAAK,KAAK,UAAW,CAE3D,IAAMC,EAAgB,KAAK,SAAS,MAAMD,EAAGA,EAAI,KAAK,SAAS,EACzDE,EAAc,KAAK,OAAO,MAAMF,EAAGA,EAAI,KAAK,SAAS,EAErD,CAACS,EAAYE,CAAO,EAAI,KAAK,gBAAgBV,EAAeC,CAAW,EAEzE,OAAO,KAAK,QAAQ,gBAAmB,YACvC,KAAK,QAAQ,eAAeR,EAAG,KAAK,QAAQ,OAAQe,EAAYE,CAAO,EAG3E,KAAK,QAAUF,EACf,KAAK,KAAOE,CAChB,CACJ,CAEA,MAAO,CAAC,KAAK,QAAS,KAAK,IAAI,CACnC,CAKA,QAAQC,EAAoB,CAExB,GAAIA,EAAS,SAAW,KAAK,QAAQ,OACjC,MAAM,IAAI,MAAM,0DAA0D,EAK9E,IAAIC,EAAa,KAAK,KAEtB,QAASnB,EAAI,EAAGA,EAAIkB,EAAS,OAAQlB,IACjCmB,GAAcD,EAASlB,CAAC,EAAI,KAAK,QAAQA,CAAC,EAG9C,OAAOmB,CACX,CAgBA,UAAW,CACP,IAAIC,EAAuB,EACvBC,EAAoB,EAElBC,EAAqB,KAAK,OAAO,QAAU,EAAI,EACjD,KAAK,OAAO,OAAO,CAAChC,EAAKS,IAAMT,EAAMS,CAAC,EAAI,KAAK,OAAO,OAE1D,QAAS,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,IAAK,CAC3C,IAAMa,EAAc,KAAK,OAAO,CAAC,EAC3BC,EAAiB,KAAK,QAAQ,KAAK,SAAS,CAAC,CAAC,EAEpDO,GAAyBnB,EAAAW,EAAcC,EAAmB,GAC1DQ,GAAsBpB,EAAAW,EAAcU,EAAuB,EAC/D,CAEA,MAAO,GAAKF,EAAuBC,CACvC,CASA,kBAAmB,CACf,GAAG,KAAK,SAAS,QAAU,EAAG,MAAO,GAErC,IAAIE,EAAM,EAEV,QAASvB,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMY,EAAc,KAAK,OAAOZ,CAAC,EAC3Ba,EAAiB,KAAK,QAAQ,KAAK,SAASb,CAAC,CAAC,EAEpDuB,GAAQtB,EAAAW,EAAcC,EAAmB,EAC7C,CAEA,OAAAU,GAAO,KAAK,SAAS,OAEdA,CACX,CA+CJ", "names": ["LinearRegression", "options", "__publicField", "pearsonCoefficients", "yMean", "sum", "y", "featureIndex", "sumXY", "sumX2", "sumY2", "xValues", "feature", "xMean", "x", "i", "__pow", "_a", "len", "arr", "indices", "j", "batchFeatures", "batchLabels", "mGradientSums", "bGradientSum", "_features", "actualValue", "predictedValue", "diff", "newWeights", "gradientM", "newBias", "features", "prediction", "residualSumOfSquares", "totalSumOfSquares", "meanOfActualValues", "mse"] } diff --git a/dist/mz-ml.min.js b/dist/mz-ml.min.js index 5e8d0a9..0f70555 100644 --- a/dist/mz-ml.min.js +++ b/dist/mz-ml.min.js @@ -4,5 +4,5 @@ A collection of TypeScript-based ML helpers. https://github.com/mzusin/mz-ml Copyright (c) 2023-present, Miriam Zusin */ -(()=>{var S=Object.defineProperty;var w=Object.getOwnPropertySymbols;var M=Object.prototype.hasOwnProperty,V=Object.prototype.propertyIsEnumerable;var f=Math.pow,m=(a,e,t)=>e in a?S(a,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):a[e]=t,z=(a,e)=>{for(var t in e||(e={}))M.call(e,t)&&m(a,t,e[t]);if(w)for(var t of w(e))V.call(e,t)&&m(a,t,e[t]);return a};var y=(a,e)=>{for(var t in e)S(a,t,{get:e[t],enumerable:!0})};var o=(a,e,t)=>(m(a,typeof e!="symbol"?e+"":e,t),t);var d={};y(d,{LinearRegression:()=>b});var b=class{constructor(e){o(this,"options");o(this,"weights");o(this,"bias");o(this,"features");o(this,"labels");o(this,"n");o(this,"batchSize");o(this,"pearson",()=>{if(this.features.length<=0||this.labels.length<=0)return[];let e=[],t=this.labels.reduce((s,i)=>s+i,0)/this.labels.length;for(let s=0;sr[s]),u=n.reduce((r,c)=>r+c,0)/n.length;for(let r=0;r0?this.features[0].length:0,this.weights=b.initZeroArray(this.n),this.weights.length=this.n,this.weights.fill(0),this.bias=0,this.batchSize=(t=this.options.batchSize)!=null?t:this.features.length}static initZeroArray(e){let t=[];return t.length=e,t.fill(0),t}shuffle(){let e=[];for(let t=0;t0;t--){let s=Math.floor(Math.random()*(t+1));[e[t],e[s]]=[e[s],e[t]]}for(let t=this.features.length-1;t>0;t--)[this.features[t],this.features[e[t]]]=[this.features[e[t]],this.features[t]],[this.labels[t],this.labels[e[t]]]=[this.labels[e[t]],this.labels[t]]}gradientDescent(e,t){let s=b.initZeroArray(this.n),i=0;for(let n=0;ni+h)/this.labels.length;for(let i=0;i{var S=Object.defineProperty;var w=Object.getOwnPropertySymbols;var M=Object.prototype.hasOwnProperty,V=Object.prototype.propertyIsEnumerable;var f=Math.pow,m=(a,e,t)=>e in a?S(a,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):a[e]=t,z=(a,e)=>{for(var t in e||(e={}))M.call(e,t)&&m(a,t,e[t]);if(w)for(var t of w(e))V.call(e,t)&&m(a,t,e[t]);return a};var y=(a,e)=>{for(var t in e)S(a,t,{get:e[t],enumerable:!0})};var o=(a,e,t)=>(m(a,typeof e!="symbol"?e+"":e,t),t);var d={};y(d,{LinearRegression:()=>b});var b=class{constructor(e){o(this,"options");o(this,"weights");o(this,"bias");o(this,"features");o(this,"labels");o(this,"n");o(this,"batchSize");o(this,"pearson",()=>{if(this.features.length<=0||this.labels.length<=0)return[];let e=[],t=this.labels.reduce((s,i)=>s+i,0)/this.labels.length;for(let s=0;sr[s]),u=n.reduce((r,c)=>r+c,0)/n.length;for(let r=0;r0?this.features[0].length:0,this.weights=b.initZeroArray(this.n),this.weights.length=this.n,this.weights.fill(0),this.bias=0,this.batchSize=(t=this.options.batchSize)!=null?t:this.features.length}static initZeroArray(e){let t=[];return t.length=e,t.fill(0),t}shuffle(){let e=[];for(let t=0;t0;t--){let s=Math.floor(Math.random()*(t+1));[e[t],e[s]]=[e[s],e[t]]}for(let t=this.features.length-1;t>0;t--)[this.features[t],this.features[e[t]]]=[this.features[e[t]],this.features[t]],[this.labels[t],this.labels[e[t]]]=[this.labels[e[t]],this.labels[t]]}gradientDescent(e,t){let s=b.initZeroArray(this.n),i=0;for(let n=0;ni+h)/this.labels.length;for(let i=0;i 0 ? this.features[0].length : 0;\n\n // Initialize weights to zero\n this.weights = LinearRegression.initZeroArray(this.n);\n this.weights.length = this.n;\n this.weights.fill(0);\n\n this.bias = 0;\n\n this.batchSize = this.options.batchSize ?? this.features.length;\n }\n\n private static initZeroArray(len: number) {\n const arr: number[] = [];\n arr.length = len;\n arr.fill(0);\n return arr;\n }\n\n private shuffle() {\n const indices: number[] = [];\n for(let i=0; i 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [indices[i], indices[j]] = [indices[j], indices[i]];\n }\n\n for (let i = this.features.length - 1; i > 0; i--) {\n [this.features[i], this.features[indices[i]]] = [this.features[indices[i]], this.features[i]];\n [this.labels[i], this.labels[indices[i]]] = [this.labels[indices[i]], this.labels[i]];\n }\n }\n\n private gradientDescent(batchFeatures: number[][], batchLabels: number[]) : [ number[], number ] {\n\n const mGradientSums = LinearRegression.initZeroArray(this.n);\n let bGradientSum = 0;\n\n for (let i = 0; i < batchFeatures.length; i++) {\n\n const _features: number[] = batchFeatures[i];\n\n const actualValue = batchLabels[i];\n const predictedValue = this.predict(_features);\n const diff = actualValue - predictedValue;\n\n // dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n for (let j = 0; j < this.n; j++) {\n mGradientSums[j] += -2 * _features[j] * diff;\n }\n\n // dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n bGradientSum += -2 * diff;\n }\n\n // Update weights and bias using learning rate\n const newWeights = [];\n\n for(let i=0; i sum + x) / this.labels.length; // yMean\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n residualSumOfSquares += (actualValue - predictedValue) ** 2;\n totalSumOfSquares += (actualValue - meanOfActualValues) ** 2;\n }\n\n return 1 - (residualSumOfSquares / totalSumOfSquares);\n }\n\n /**\n * MSE = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * The ideal value of Mean Squared Error (MSE) is 0.\n * Achieving an MSE of 0 would mean that the model perfectly predicts the target variable\n * for every data point in the training set. However, it's important to note\n * that achieving an MSE of exactly 0 is extremely rare and often unrealistic, especially with real-world data.\n */\n meanSquaredError() {\n if(this.features.length <= 0) return 0;\n\n let mse = 0;\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n mse += (actualValue - predictedValue) ** 2;\n }\n\n mse /= this.features.length;\n\n return mse;\n }\n\n /**\n * Compute the Pearson correlation coefficient.\n * --------------------------------------------\n * It is a statistical measure that quantifies the strength and direction of the linear relationship\n * between two variables. It's commonly used to assess the strength of association\n * between two continuous variables.\n *\n * Range [-1, 1]\n * r=1 indicates a perfect positive linear relationship,\n * meaning that as one variable increases, the other variable increases proportionally.\n *\n * r=\u22121 indicates a perfect negative linear relationship, meaning that as one variable increases,\n * the other variable decreases proportionally.\n *\n * r= 0 indicates no linear relationship between the variables.\n */\n pearson = () : number[] => {\n if (this.features.length <= 0 || this.labels.length <= 0) return [];\n\n const pearsonCoefficients: number[] = [];\n const yMean = this.labels.reduce((sum, y) => sum + y, 0) / this.labels.length;\n\n for (let featureIndex = 0; featureIndex < this.n; featureIndex++) {\n let sumXY = 0; // Sum of the product of (x - xMean) and (y - yMean)\n let sumX2 = 0; // Sum of squared differences between x and xMean\n let sumY2 = 0; // Sum of squared differences between y and yMean\n\n const xValues = this.features.map(feature => feature[featureIndex]);\n const xMean = xValues.reduce((sum, x) => sum + x, 0) / xValues.length;\n\n for (let i = 0; i < this.features.length; i++) {\n const x = this.features[i][featureIndex];\n const y = this.labels[i];\n\n sumXY += (x - xMean) * (y - yMean);\n sumX2 += (x - xMean) ** 2;\n sumY2 += (y - yMean) ** 2;\n }\n\n pearsonCoefficients.push((sumX2 === 0 || sumY2 === 0) ? 0 : (sumXY / Math.sqrt(sumX2 * sumY2)));\n }\n\n return pearsonCoefficients;\n }\n\n}", "import * as LinearRegression from './core/linear-regression';\n\nconst api = {\n ...LinearRegression,\n};\n\ndeclare global {\n interface Window {\n mzMl: typeof api,\n }\n}\n\nwindow.mzMl = window.mzMl || api;\n\nexport * from './core/linear-regression';"], - "mappings": ";;;;;;4dAAA,IAAAA,EAAA,GAAAC,EAAAD,EAAA,sBAAAE,IAwCO,IAAMC,EAAN,KAAuB,CAY1B,YAAYC,EAAmC,CAV/CC,EAAA,gBACAA,EAAA,gBACAA,EAAA,aAEAA,EAAA,iBACAA,EAAA,eACAA,EAAA,UAEAA,EAAA,kBAyMAA,EAAA,eAAU,IAAiB,CACvB,GAAI,KAAK,SAAS,QAAU,GAAK,KAAK,OAAO,QAAU,EAAG,MAAO,CAAC,EAElE,IAAMC,EAAgC,CAAC,EACjCC,EAAQ,KAAK,OAAO,OAAO,CAACC,EAAKC,IAAMD,EAAMC,EAAG,CAAC,EAAI,KAAK,OAAO,OAEvE,QAASC,EAAe,EAAGA,EAAe,KAAK,EAAGA,IAAgB,CAC9D,IAAIC,EAAQ,EACRC,EAAQ,EACRC,EAAQ,EAENC,EAAU,KAAK,SAAS,IAAIC,GAAWA,EAAQL,CAAY,CAAC,EAC5DM,EAAQF,EAAQ,OAAO,CAACN,EAAKS,IAAMT,EAAMS,EAAG,CAAC,EAAIH,EAAQ,OAE/D,QAASI,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMD,EAAI,KAAK,SAASC,CAAC,EAAER,CAAY,EACjCD,EAAI,KAAK,OAAOS,CAAC,EAEvBP,IAAUM,EAAID,IAAUP,EAAIF,GAC5BK,GAAUO,EAAAF,EAAID,EAAU,GACxBH,GAAUM,EAAAV,EAAIF,EAAU,EAC5B,CAEAD,EAAoB,KAAMM,IAAU,GAAKC,IAAU,EAAK,EAAKF,EAAQ,KAAK,KAAKC,EAAQC,CAAK,CAAE,CAClG,CAEA,OAAOP,CACX,GAtRJ,IAAAc,EAqDQ,KAAK,QAAUhB,EAEf,KAAK,SAAW,CAAC,GAAG,KAAK,QAAQ,QAAQ,EACzC,KAAK,OAAS,CAAC,GAAG,KAAK,QAAQ,MAAM,EACrC,KAAK,EAAI,KAAK,SAAS,OAAS,EAAI,KAAK,SAAS,CAAC,EAAE,OAAS,EAG9D,KAAK,QAAUD,EAAiB,cAAc,KAAK,CAAC,EACpD,KAAK,QAAQ,OAAS,KAAK,EAC3B,KAAK,QAAQ,KAAK,CAAC,EAEnB,KAAK,KAAO,EAEZ,KAAK,WAAYiB,EAAA,KAAK,QAAQ,YAAb,KAAAA,EAA0B,KAAK,SAAS,MAC7D,CAEA,OAAe,cAAcC,EAAa,CACtC,IAAMC,EAAgB,CAAC,EACvB,OAAAA,EAAI,OAASD,EACbC,EAAI,KAAK,CAAC,EACHA,CACX,CAEQ,SAAU,CACd,IAAMC,EAAoB,CAAC,EAC3B,QAAQL,EAAE,EAAGA,EAAE,KAAK,EAAGA,IACnBK,EAAQ,KAAKL,CAAC,EAGlB,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAAK,CAC/C,IAAMM,EAAI,KAAK,MAAM,KAAK,OAAO,GAAKN,EAAI,EAAE,EAC5C,CAACK,EAAQL,CAAC,EAAGK,EAAQC,CAAC,CAAC,EAAI,CAACD,EAAQC,CAAC,EAAGD,EAAQL,CAAC,CAAC,CACtD,CAEA,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAC1C,CAAC,KAAK,SAASA,CAAC,EAAG,KAAK,SAASK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,SAASK,EAAQL,CAAC,CAAC,EAAG,KAAK,SAASA,CAAC,CAAC,EAC5F,CAAC,KAAK,OAAOA,CAAC,EAAG,KAAK,OAAOK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,OAAOK,EAAQL,CAAC,CAAC,EAAG,KAAK,OAAOA,CAAC,CAAC,CAE5F,CAEQ,gBAAgBO,EAA2BC,EAA8C,CAE7F,IAAMC,EAAgBxB,EAAiB,cAAc,KAAK,CAAC,EACvDyB,EAAe,EAEnB,QAASV,EAAI,EAAGA,EAAIO,EAAc,OAAQP,IAAK,CAE3C,IAAMW,EAAsBJ,EAAcP,CAAC,EAErCY,EAAcJ,EAAYR,CAAC,EAC3Ba,EAAiB,KAAK,QAAQF,CAAS,EACvCG,EAAOF,EAAcC,EAG3B,QAASP,EAAI,EAAGA,EAAI,KAAK,EAAGA,IACxBG,EAAcH,CAAC,GAAK,GAAKK,EAAUL,CAAC,EAAIQ,EAI5CJ,GAAgB,GAAKI,CACzB,CAGA,IAAMC,EAAa,CAAC,EAEpB,QAAQf,EAAE,EAAGA,EAAE,KAAK,QAAQ,OAAQA,IAAK,CAIrC,IAAMgB,EAHU,KAAK,QAAQhB,CAAC,EAGD,KAAK,QAAQ,aAAe,KAAK,UAAaS,EAAcT,CAAC,EAC1Fe,EAAW,KAAKC,CAAS,CAC7B,CAGA,IAAMC,EAAU,KAAK,KAAQ,KAAK,QAAQ,aAAe,KAAK,UAAaP,EAE3E,MAAO,CAACK,EAAYE,CAAO,CAC/B,CAEA,OAAQ,CACJ,QAAQjB,EAAI,EAAGA,EAAI,KAAK,QAAQ,OAAQA,IAAK,CAErC,KAAK,QAAQ,SACb,KAAK,QAAQ,EAIjB,QAASM,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,GAAK,KAAK,UAAW,CAE3D,IAAMC,EAAgB,KAAK,SAAS,MAAMD,EAAGA,EAAI,KAAK,SAAS,EACzDE,EAAc,KAAK,OAAO,MAAMF,EAAGA,EAAI,KAAK,SAAS,EAErD,CAACS,EAAYE,CAAO,EAAI,KAAK,gBAAgBV,EAAeC,CAAW,EAEzE,OAAO,KAAK,QAAQ,gBAAmB,YACvC,KAAK,QAAQ,eAAeR,EAAG,KAAK,QAAQ,OAAQe,EAAYE,CAAO,EAG3E,KAAK,QAAUF,EACf,KAAK,KAAOE,CAChB,CACJ,CAEA,MAAO,CAAC,KAAK,QAAS,KAAK,IAAI,CACnC,CAKA,QAAQC,EAAoB,CAExB,GAAIA,EAAS,SAAW,KAAK,QAAQ,OACjC,MAAM,IAAI,MAAM,0DAA0D,EAK9E,IAAIC,EAAa,KAAK,KAEtB,QAASnB,EAAI,EAAGA,EAAIkB,EAAS,OAAQlB,IACjCmB,GAAcD,EAASlB,CAAC,EAAI,KAAK,QAAQA,CAAC,EAG9C,OAAOmB,CACX,CAgBA,UAAW,CACP,IAAIC,EAAuB,EACvBC,EAAoB,EAElBC,EAAqB,KAAK,OAAO,QAAU,EAAI,EACjD,KAAK,OAAO,OAAO,CAAChC,EAAKS,IAAMT,EAAMS,CAAC,EAAI,KAAK,OAAO,OAE1D,QAAS,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,IAAK,CAC3C,IAAMa,EAAc,KAAK,OAAO,CAAC,EAC3BC,EAAiB,KAAK,QAAQ,KAAK,SAAS,CAAC,CAAC,EAEpDO,GAAyBnB,EAAAW,EAAcC,EAAmB,GAC1DQ,GAAsBpB,EAAAW,EAAcU,EAAuB,EAC/D,CAEA,MAAO,GAAKF,EAAuBC,CACvC,CASA,kBAAmB,CACf,GAAG,KAAK,SAAS,QAAU,EAAG,MAAO,GAErC,IAAIE,EAAM,EAEV,QAASvB,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMY,EAAc,KAAK,OAAOZ,CAAC,EAC3Ba,EAAiB,KAAK,QAAQ,KAAK,SAASb,CAAC,CAAC,EAEpDuB,GAAQtB,EAAAW,EAAcC,EAAmB,EAC7C,CAEA,OAAAU,GAAO,KAAK,SAAS,OAEdA,CACX,CA+CJ,ECtRA,IAAMC,EAAMC,EAAA,GACLC,GASP,OAAO,KAAO,OAAO,MAAQF", + "sourcesContent": ["import { ILinearRegressionOptions } from '../interfaces';\n\n/**\n * Linear Regression\n *\n * Mean Squared Error (MSE): Error function = Loss function\n * E = (1/n) * sum_from_0_to_n((actual_value - predicted_value)^2)\n * E = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * ---------------------------------------------------------\n * Goal: Minimize the error function - find (m, b) with the lowest possible E.\n * How:\n *\n * - Take partial derivative with respect m and also with respect b.\n * This helps to find the \"m\" that maximally increase E,\n * and \"b\" that maximally increase E (the steepest ascent).\n *\n * - After we found them, we get the opposite direction\n * to find the way to decrease E (the steepest descent).\n * ---------------------------------------------------------\n *\n * How to calculate partial derivative of \"m\"?\n * dE/dm = (1/n) * sum_from_0_to_n(2 * (actual_value - (mx + b)) * (-x))\n * dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n * ---------------------------------------------------------\n *\n * How to calculate partial derivative of \"b\"?\n * dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n * ---------------------------------------------------------\n *\n * After the derivatives are found (the steepest ascent)\n * we need to find the steepest descent:\n *\n * new_m = current_m - learning_rate * dE/dm\n * new_b = current_b - learning_rate * dE/db\n *\n * General Form:\n * ------------\n * y = w1*x1 + w2*x2 + \u2026 + wn*xn + b\n * [w1, ..., wn] = weights, b = bias\n */\nexport class LinearRegression {\n\n options: ILinearRegressionOptions;\n weights: number[];\n bias: number;\n\n features: number[][];\n labels: number[];\n n: number;\n\n batchSize: number;\n\n constructor(options: ILinearRegressionOptions) {\n this.options = options;\n\n this.features = [...this.options.features];\n this.labels = [...this.options.labels];\n this.n = this.features.length > 0 ? this.features[0].length : 0;\n\n // Initialize weights to zero\n this.weights = LinearRegression.initZeroArray(this.n);\n this.weights.length = this.n;\n this.weights.fill(0);\n\n this.bias = 0;\n\n this.batchSize = this.options.batchSize ?? this.features.length;\n }\n\n private static initZeroArray(len: number) {\n const arr: number[] = [];\n arr.length = len;\n arr.fill(0);\n return arr;\n }\n\n private shuffle() {\n const indices: number[] = [];\n for(let i=0; i 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [indices[i], indices[j]] = [indices[j], indices[i]];\n }\n\n for (let i = this.features.length - 1; i > 0; i--) {\n [this.features[i], this.features[indices[i]]] = [this.features[indices[i]], this.features[i]];\n [this.labels[i], this.labels[indices[i]]] = [this.labels[indices[i]], this.labels[i]];\n }\n }\n\n private gradientDescent(batchFeatures: number[][], batchLabels: number[]) : [ number[], number ] {\n\n const mGradientSums = LinearRegression.initZeroArray(this.n);\n let bGradientSum = 0;\n\n for (let i = 0; i < batchFeatures.length; i++) {\n\n const _features: number[] = batchFeatures[i];\n\n const actualValue = batchLabels[i];\n const predictedValue = this.predict(_features);\n const diff = actualValue - predictedValue;\n\n // dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n for (let j = 0; j < this.n; j++) {\n mGradientSums[j] += -2 * _features[j] * diff;\n }\n\n // dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n bGradientSum += -2 * diff;\n }\n\n // Update weights and bias using learning rate\n const newWeights = [];\n\n for(let i=0; i sum + x) / this.labels.length; // yMean\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n residualSumOfSquares += (actualValue - predictedValue) ** 2;\n totalSumOfSquares += (actualValue - meanOfActualValues) ** 2;\n }\n\n return 1 - (residualSumOfSquares / totalSumOfSquares);\n }\n\n /**\n * MSE = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * The ideal value of Mean Squared Error (MSE) is 0.\n * Achieving an MSE of 0 would mean that the model perfectly predicts the target variable\n * for every data point in the training set. However, it's important to note\n * that achieving an MSE of exactly 0 is extremely rare and often unrealistic, especially with real-world data.\n */\n meanSquaredError() {\n if(this.features.length <= 0) return 0;\n\n let mse = 0;\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n mse += (actualValue - predictedValue) ** 2;\n }\n\n mse /= this.features.length;\n\n return mse;\n }\n\n /**\n * Compute the Pearson correlation coefficient.\n * --------------------------------------------\n * It is a statistical measure that quantifies the strength and direction of the linear relationship\n * between two variables. It's commonly used to assess the strength of association\n * between two continuous variables.\n *\n * Range [-1, 1]\n * r=1 indicates a perfect positive linear relationship,\n * meaning that as one variable increases, the other variable increases proportionally.\n *\n * r=\u22121 indicates a perfect negative linear relationship, meaning that as one variable increases,\n * the other variable decreases proportionally.\n *\n * r= 0 indicates no linear relationship between the variables.\n */\n pearson = () : number[] => {\n if (this.features.length <= 0 || this.labels.length <= 0) return [];\n\n const pearsonCoefficients: number[] = [];\n const yMean = this.labels.reduce((sum, y) => sum + y, 0) / this.labels.length;\n\n for (let featureIndex = 0; featureIndex < this.n; featureIndex++) {\n let sumXY = 0; // Sum of the product of (x - xMean) and (y - yMean)\n let sumX2 = 0; // Sum of squared differences between x and xMean\n let sumY2 = 0; // Sum of squared differences between y and yMean\n\n const xValues = this.features.map(feature => feature[featureIndex]);\n const xMean = xValues.reduce((sum, x) => sum + x, 0) / xValues.length;\n\n for (let i = 0; i < this.features.length; i++) {\n const x = this.features[i][featureIndex];\n const y = this.labels[i];\n\n sumXY += (x - xMean) * (y - yMean);\n sumX2 += (x - xMean) ** 2;\n sumY2 += (y - yMean) ** 2;\n }\n\n pearsonCoefficients.push((sumX2 === 0 || sumY2 === 0) ? 0 : (sumXY / Math.sqrt(sumX2 * sumY2)));\n }\n\n return pearsonCoefficients;\n }\n\n}", "import * as LinearRegression from './core/linear-regression';\n\nconst api = {\n ...LinearRegression,\n};\n\ndeclare global {\n interface Window {\n mzMl: typeof api,\n }\n}\n\nwindow.mzMl = window.mzMl || api;\n\nexport * from './core/linear-regression';"], + "mappings": ";;;;;;4dAAA,IAAAA,EAAA,GAAAC,EAAAD,EAAA,sBAAAE,IAwCO,IAAMC,EAAN,KAAuB,CAY1B,YAAYC,EAAmC,CAV/CC,EAAA,gBACAA,EAAA,gBACAA,EAAA,aAEAA,EAAA,iBACAA,EAAA,eACAA,EAAA,UAEAA,EAAA,kBAyMAA,EAAA,eAAU,IAAiB,CACvB,GAAI,KAAK,SAAS,QAAU,GAAK,KAAK,OAAO,QAAU,EAAG,MAAO,CAAC,EAElE,IAAMC,EAAgC,CAAC,EACjCC,EAAQ,KAAK,OAAO,OAAO,CAACC,EAAKC,IAAMD,EAAMC,EAAG,CAAC,EAAI,KAAK,OAAO,OAEvE,QAASC,EAAe,EAAGA,EAAe,KAAK,EAAGA,IAAgB,CAC9D,IAAIC,EAAQ,EACRC,EAAQ,EACRC,EAAQ,EAENC,EAAU,KAAK,SAAS,IAAIC,GAAWA,EAAQL,CAAY,CAAC,EAC5DM,EAAQF,EAAQ,OAAO,CAACN,EAAKS,IAAMT,EAAMS,EAAG,CAAC,EAAIH,EAAQ,OAE/D,QAASI,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMD,EAAI,KAAK,SAASC,CAAC,EAAER,CAAY,EACjCD,EAAI,KAAK,OAAOS,CAAC,EAEvBP,IAAUM,EAAID,IAAUP,EAAIF,GAC5BK,GAAUO,EAAAF,EAAID,EAAU,GACxBH,GAAUM,EAAAV,EAAIF,EAAU,EAC5B,CAEAD,EAAoB,KAAMM,IAAU,GAAKC,IAAU,EAAK,EAAKF,EAAQ,KAAK,KAAKC,EAAQC,CAAK,CAAE,CAClG,CAEA,OAAOP,CACX,GAtRJ,IAAAc,EAqDQ,KAAK,QAAUhB,EAEf,KAAK,SAAW,CAAC,GAAG,KAAK,QAAQ,QAAQ,EACzC,KAAK,OAAS,CAAC,GAAG,KAAK,QAAQ,MAAM,EACrC,KAAK,EAAI,KAAK,SAAS,OAAS,EAAI,KAAK,SAAS,CAAC,EAAE,OAAS,EAG9D,KAAK,QAAUD,EAAiB,cAAc,KAAK,CAAC,EACpD,KAAK,QAAQ,OAAS,KAAK,EAC3B,KAAK,QAAQ,KAAK,CAAC,EAEnB,KAAK,KAAO,EAEZ,KAAK,WAAYiB,EAAA,KAAK,QAAQ,YAAb,KAAAA,EAA0B,KAAK,SAAS,MAC7D,CAEA,OAAe,cAAcC,EAAa,CACtC,IAAMC,EAAgB,CAAC,EACvB,OAAAA,EAAI,OAASD,EACbC,EAAI,KAAK,CAAC,EACHA,CACX,CAEQ,SAAU,CACd,IAAMC,EAAoB,CAAC,EAC3B,QAAQL,EAAE,EAAGA,EAAE,KAAK,EAAGA,IACnBK,EAAQ,KAAKL,CAAC,EAGlB,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAAK,CAC/C,IAAMM,EAAI,KAAK,MAAM,KAAK,OAAO,GAAKN,EAAI,EAAE,EAC5C,CAACK,EAAQL,CAAC,EAAGK,EAAQC,CAAC,CAAC,EAAI,CAACD,EAAQC,CAAC,EAAGD,EAAQL,CAAC,CAAC,CACtD,CAEA,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAC1C,CAAC,KAAK,SAASA,CAAC,EAAG,KAAK,SAASK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,SAASK,EAAQL,CAAC,CAAC,EAAG,KAAK,SAASA,CAAC,CAAC,EAC5F,CAAC,KAAK,OAAOA,CAAC,EAAG,KAAK,OAAOK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,OAAOK,EAAQL,CAAC,CAAC,EAAG,KAAK,OAAOA,CAAC,CAAC,CAE5F,CAEQ,gBAAgBO,EAA2BC,EAA8C,CAE7F,IAAMC,EAAgBxB,EAAiB,cAAc,KAAK,CAAC,EACvDyB,EAAe,EAEnB,QAASV,EAAI,EAAGA,EAAIO,EAAc,OAAQP,IAAK,CAE3C,IAAMW,EAAsBJ,EAAcP,CAAC,EAErCY,EAAcJ,EAAYR,CAAC,EAC3Ba,EAAiB,KAAK,QAAQF,CAAS,EACvCG,EAAOF,EAAcC,EAG3B,QAASP,EAAI,EAAGA,EAAI,KAAK,EAAGA,IACxBG,EAAcH,CAAC,GAAK,GAAKK,EAAUL,CAAC,EAAIQ,EAI5CJ,GAAgB,GAAKI,CACzB,CAGA,IAAMC,EAAa,CAAC,EAEpB,QAAQf,EAAE,EAAGA,EAAE,KAAK,QAAQ,OAAQA,IAAK,CAIrC,IAAMgB,EAHU,KAAK,QAAQhB,CAAC,EAGD,KAAK,QAAQ,aAAe,KAAK,UAAaS,EAAcT,CAAC,EAC1Fe,EAAW,KAAKC,CAAS,CAC7B,CAGA,IAAMC,EAAU,KAAK,KAAQ,KAAK,QAAQ,aAAe,KAAK,UAAaP,EAE3E,MAAO,CAACK,EAAYE,CAAO,CAC/B,CAEA,KAAM,CACF,QAAQjB,EAAI,EAAGA,EAAI,KAAK,QAAQ,OAAQA,IAAK,CAErC,KAAK,QAAQ,SACb,KAAK,QAAQ,EAIjB,QAASM,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,GAAK,KAAK,UAAW,CAE3D,IAAMC,EAAgB,KAAK,SAAS,MAAMD,EAAGA,EAAI,KAAK,SAAS,EACzDE,EAAc,KAAK,OAAO,MAAMF,EAAGA,EAAI,KAAK,SAAS,EAErD,CAACS,EAAYE,CAAO,EAAI,KAAK,gBAAgBV,EAAeC,CAAW,EAEzE,OAAO,KAAK,QAAQ,gBAAmB,YACvC,KAAK,QAAQ,eAAeR,EAAG,KAAK,QAAQ,OAAQe,EAAYE,CAAO,EAG3E,KAAK,QAAUF,EACf,KAAK,KAAOE,CAChB,CACJ,CAEA,MAAO,CAAC,KAAK,QAAS,KAAK,IAAI,CACnC,CAKA,QAAQC,EAAoB,CAExB,GAAIA,EAAS,SAAW,KAAK,QAAQ,OACjC,MAAM,IAAI,MAAM,0DAA0D,EAK9E,IAAIC,EAAa,KAAK,KAEtB,QAASnB,EAAI,EAAGA,EAAIkB,EAAS,OAAQlB,IACjCmB,GAAcD,EAASlB,CAAC,EAAI,KAAK,QAAQA,CAAC,EAG9C,OAAOmB,CACX,CAgBA,UAAW,CACP,IAAIC,EAAuB,EACvBC,EAAoB,EAElBC,EAAqB,KAAK,OAAO,QAAU,EAAI,EACjD,KAAK,OAAO,OAAO,CAAChC,EAAKS,IAAMT,EAAMS,CAAC,EAAI,KAAK,OAAO,OAE1D,QAAS,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,IAAK,CAC3C,IAAMa,EAAc,KAAK,OAAO,CAAC,EAC3BC,EAAiB,KAAK,QAAQ,KAAK,SAAS,CAAC,CAAC,EAEpDO,GAAyBnB,EAAAW,EAAcC,EAAmB,GAC1DQ,GAAsBpB,EAAAW,EAAcU,EAAuB,EAC/D,CAEA,MAAO,GAAKF,EAAuBC,CACvC,CASA,kBAAmB,CACf,GAAG,KAAK,SAAS,QAAU,EAAG,MAAO,GAErC,IAAIE,EAAM,EAEV,QAASvB,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMY,EAAc,KAAK,OAAOZ,CAAC,EAC3Ba,EAAiB,KAAK,QAAQ,KAAK,SAASb,CAAC,CAAC,EAEpDuB,GAAQtB,EAAAW,EAAcC,EAAmB,EAC7C,CAEA,OAAAU,GAAO,KAAK,SAAS,OAEdA,CACX,CA+CJ,ECtRA,IAAMC,EAAMC,EAAA,GACLC,GASP,OAAO,KAAO,OAAO,MAAQF", "names": ["linear_regression_exports", "__export", "LinearRegression", "LinearRegression", "options", "__publicField", "pearsonCoefficients", "yMean", "sum", "y", "featureIndex", "sumXY", "sumX2", "sumY2", "xValues", "feature", "xMean", "x", "i", "__pow", "_a", "len", "arr", "indices", "j", "batchFeatures", "batchLabels", "mGradientSums", "bGradientSum", "_features", "actualValue", "predictedValue", "diff", "newWeights", "gradientM", "newBias", "features", "prediction", "residualSumOfSquares", "totalSumOfSquares", "meanOfActualValues", "mse", "api", "__spreadValues", "linear_regression_exports"] } diff --git a/dist/mz-ml.node.cjs b/dist/mz-ml.node.cjs index 3f64840..f3c4fe0 100644 --- a/dist/mz-ml.node.cjs +++ b/dist/mz-ml.node.cjs @@ -4,5 +4,5 @@ A collection of TypeScript-based ML helpers. https://github.com/mzusin/mz-ml Copyright (c) 2023-present, Miriam Zusin */ -var m=Object.defineProperty;var d=Object.getOwnPropertyDescriptor;var w=Object.getOwnPropertyNames;var S=Object.prototype.hasOwnProperty;var f=Math.pow,z=(n,e,t)=>e in n?m(n,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):n[e]=t;var V=(n,e)=>{for(var t in e)m(n,t,{get:e[t],enumerable:!0})},x=(n,e,t,s)=>{if(e&&typeof e=="object"||typeof e=="function")for(let i of w(e))!S.call(n,i)&&i!==t&&m(n,i,{get:()=>e[i],enumerable:!(s=d(e,i))||s.enumerable});return n};var y=n=>x(m({},"__esModule",{value:!0}),n);var o=(n,e,t)=>(z(n,typeof e!="symbol"?e+"":e,t),t);var M={};V(M,{LinearRegression:()=>b});module.exports=y(M);var b=class{constructor(e){o(this,"options");o(this,"weights");o(this,"bias");o(this,"features");o(this,"labels");o(this,"n");o(this,"batchSize");o(this,"pearson",()=>{if(this.features.length<=0||this.labels.length<=0)return[];let e=[],t=this.labels.reduce((s,i)=>s+i,0)/this.labels.length;for(let s=0;sa[s]),u=r.reduce((a,c)=>a+c,0)/r.length;for(let a=0;a0?this.features[0].length:0,this.weights=b.initZeroArray(this.n),this.weights.length=this.n,this.weights.fill(0),this.bias=0,this.batchSize=(t=this.options.batchSize)!=null?t:this.features.length}static initZeroArray(e){let t=[];return t.length=e,t.fill(0),t}shuffle(){let e=[];for(let t=0;t0;t--){let s=Math.floor(Math.random()*(t+1));[e[t],e[s]]=[e[s],e[t]]}for(let t=this.features.length-1;t>0;t--)[this.features[t],this.features[e[t]]]=[this.features[e[t]],this.features[t]],[this.labels[t],this.labels[e[t]]]=[this.labels[e[t]],this.labels[t]]}gradientDescent(e,t){let s=b.initZeroArray(this.n),i=0;for(let r=0;ri+h)/this.labels.length;for(let i=0;ie in n?m(n,e,{enumerable:!0,configurable:!0,writable:!0,value:t}):n[e]=t;var V=(n,e)=>{for(var t in e)m(n,t,{get:e[t],enumerable:!0})},x=(n,e,t,s)=>{if(e&&typeof e=="object"||typeof e=="function")for(let i of w(e))!S.call(n,i)&&i!==t&&m(n,i,{get:()=>e[i],enumerable:!(s=d(e,i))||s.enumerable});return n};var y=n=>x(m({},"__esModule",{value:!0}),n);var o=(n,e,t)=>(z(n,typeof e!="symbol"?e+"":e,t),t);var M={};V(M,{LinearRegression:()=>b});module.exports=y(M);var b=class{constructor(e){o(this,"options");o(this,"weights");o(this,"bias");o(this,"features");o(this,"labels");o(this,"n");o(this,"batchSize");o(this,"pearson",()=>{if(this.features.length<=0||this.labels.length<=0)return[];let e=[],t=this.labels.reduce((s,i)=>s+i,0)/this.labels.length;for(let s=0;sa[s]),u=r.reduce((a,c)=>a+c,0)/r.length;for(let a=0;a0?this.features[0].length:0,this.weights=b.initZeroArray(this.n),this.weights.length=this.n,this.weights.fill(0),this.bias=0,this.batchSize=(t=this.options.batchSize)!=null?t:this.features.length}static initZeroArray(e){let t=[];return t.length=e,t.fill(0),t}shuffle(){let e=[];for(let t=0;t0;t--){let s=Math.floor(Math.random()*(t+1));[e[t],e[s]]=[e[s],e[t]]}for(let t=this.features.length-1;t>0;t--)[this.features[t],this.features[e[t]]]=[this.features[e[t]],this.features[t]],[this.labels[t],this.labels[e[t]]]=[this.labels[e[t]],this.labels[t]]}gradientDescent(e,t){let s=b.initZeroArray(this.n),i=0;for(let r=0;ri+h)/this.labels.length;for(let i=0;i 0 ? this.features[0].length : 0;\n\n // Initialize weights to zero\n this.weights = LinearRegression.initZeroArray(this.n);\n this.weights.length = this.n;\n this.weights.fill(0);\n\n this.bias = 0;\n\n this.batchSize = this.options.batchSize ?? this.features.length;\n }\n\n private static initZeroArray(len: number) {\n const arr: number[] = [];\n arr.length = len;\n arr.fill(0);\n return arr;\n }\n\n private shuffle() {\n const indices: number[] = [];\n for(let i=0; i 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [indices[i], indices[j]] = [indices[j], indices[i]];\n }\n\n for (let i = this.features.length - 1; i > 0; i--) {\n [this.features[i], this.features[indices[i]]] = [this.features[indices[i]], this.features[i]];\n [this.labels[i], this.labels[indices[i]]] = [this.labels[indices[i]], this.labels[i]];\n }\n }\n\n private gradientDescent(batchFeatures: number[][], batchLabels: number[]) : [ number[], number ] {\n\n const mGradientSums = LinearRegression.initZeroArray(this.n);\n let bGradientSum = 0;\n\n for (let i = 0; i < batchFeatures.length; i++) {\n\n const _features: number[] = batchFeatures[i];\n\n const actualValue = batchLabels[i];\n const predictedValue = this.predict(_features);\n const diff = actualValue - predictedValue;\n\n // dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n for (let j = 0; j < this.n; j++) {\n mGradientSums[j] += -2 * _features[j] * diff;\n }\n\n // dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n bGradientSum += -2 * diff;\n }\n\n // Update weights and bias using learning rate\n const newWeights = [];\n\n for(let i=0; i sum + x) / this.labels.length; // yMean\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n residualSumOfSquares += (actualValue - predictedValue) ** 2;\n totalSumOfSquares += (actualValue - meanOfActualValues) ** 2;\n }\n\n return 1 - (residualSumOfSquares / totalSumOfSquares);\n }\n\n /**\n * MSE = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * The ideal value of Mean Squared Error (MSE) is 0.\n * Achieving an MSE of 0 would mean that the model perfectly predicts the target variable\n * for every data point in the training set. However, it's important to note\n * that achieving an MSE of exactly 0 is extremely rare and often unrealistic, especially with real-world data.\n */\n meanSquaredError() {\n if(this.features.length <= 0) return 0;\n\n let mse = 0;\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n mse += (actualValue - predictedValue) ** 2;\n }\n\n mse /= this.features.length;\n\n return mse;\n }\n\n /**\n * Compute the Pearson correlation coefficient.\n * --------------------------------------------\n * It is a statistical measure that quantifies the strength and direction of the linear relationship\n * between two variables. It's commonly used to assess the strength of association\n * between two continuous variables.\n *\n * Range [-1, 1]\n * r=1 indicates a perfect positive linear relationship,\n * meaning that as one variable increases, the other variable increases proportionally.\n *\n * r=\u22121 indicates a perfect negative linear relationship, meaning that as one variable increases,\n * the other variable decreases proportionally.\n *\n * r= 0 indicates no linear relationship between the variables.\n */\n pearson = () : number[] => {\n if (this.features.length <= 0 || this.labels.length <= 0) return [];\n\n const pearsonCoefficients: number[] = [];\n const yMean = this.labels.reduce((sum, y) => sum + y, 0) / this.labels.length;\n\n for (let featureIndex = 0; featureIndex < this.n; featureIndex++) {\n let sumXY = 0; // Sum of the product of (x - xMean) and (y - yMean)\n let sumX2 = 0; // Sum of squared differences between x and xMean\n let sumY2 = 0; // Sum of squared differences between y and yMean\n\n const xValues = this.features.map(feature => feature[featureIndex]);\n const xMean = xValues.reduce((sum, x) => sum + x, 0) / xValues.length;\n\n for (let i = 0; i < this.features.length; i++) {\n const x = this.features[i][featureIndex];\n const y = this.labels[i];\n\n sumXY += (x - xMean) * (y - yMean);\n sumX2 += (x - xMean) ** 2;\n sumY2 += (y - yMean) ** 2;\n }\n\n pearsonCoefficients.push((sumX2 === 0 || sumY2 === 0) ? 0 : (sumXY / Math.sqrt(sumX2 * sumY2)));\n }\n\n return pearsonCoefficients;\n }\n\n}"], - "mappings": ";;;;;;mjBAAA,IAAAA,EAAA,GAAAC,EAAAD,EAAA,sBAAAE,IAAA,eAAAC,EAAAH,GCwCO,IAAMI,EAAN,KAAuB,CAY1B,YAAYC,EAAmC,CAV/CC,EAAA,gBACAA,EAAA,gBACAA,EAAA,aAEAA,EAAA,iBACAA,EAAA,eACAA,EAAA,UAEAA,EAAA,kBAyMAA,EAAA,eAAU,IAAiB,CACvB,GAAI,KAAK,SAAS,QAAU,GAAK,KAAK,OAAO,QAAU,EAAG,MAAO,CAAC,EAElE,IAAMC,EAAgC,CAAC,EACjCC,EAAQ,KAAK,OAAO,OAAO,CAACC,EAAKC,IAAMD,EAAMC,EAAG,CAAC,EAAI,KAAK,OAAO,OAEvE,QAASC,EAAe,EAAGA,EAAe,KAAK,EAAGA,IAAgB,CAC9D,IAAIC,EAAQ,EACRC,EAAQ,EACRC,EAAQ,EAENC,EAAU,KAAK,SAAS,IAAIC,GAAWA,EAAQL,CAAY,CAAC,EAC5DM,EAAQF,EAAQ,OAAO,CAACN,EAAKS,IAAMT,EAAMS,EAAG,CAAC,EAAIH,EAAQ,OAE/D,QAASI,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMD,EAAI,KAAK,SAASC,CAAC,EAAER,CAAY,EACjCD,EAAI,KAAK,OAAOS,CAAC,EAEvBP,IAAUM,EAAID,IAAUP,EAAIF,GAC5BK,GAAUO,EAAAF,EAAID,EAAU,GACxBH,GAAUM,EAAAV,EAAIF,EAAU,EAC5B,CAEAD,EAAoB,KAAMM,IAAU,GAAKC,IAAU,EAAK,EAAKF,EAAQ,KAAK,KAAKC,EAAQC,CAAK,CAAE,CAClG,CAEA,OAAOP,CACX,GAtRJ,IAAAc,EAqDQ,KAAK,QAAUhB,EAEf,KAAK,SAAW,CAAC,GAAG,KAAK,QAAQ,QAAQ,EACzC,KAAK,OAAS,CAAC,GAAG,KAAK,QAAQ,MAAM,EACrC,KAAK,EAAI,KAAK,SAAS,OAAS,EAAI,KAAK,SAAS,CAAC,EAAE,OAAS,EAG9D,KAAK,QAAUD,EAAiB,cAAc,KAAK,CAAC,EACpD,KAAK,QAAQ,OAAS,KAAK,EAC3B,KAAK,QAAQ,KAAK,CAAC,EAEnB,KAAK,KAAO,EAEZ,KAAK,WAAYiB,EAAA,KAAK,QAAQ,YAAb,KAAAA,EAA0B,KAAK,SAAS,MAC7D,CAEA,OAAe,cAAcC,EAAa,CACtC,IAAMC,EAAgB,CAAC,EACvB,OAAAA,EAAI,OAASD,EACbC,EAAI,KAAK,CAAC,EACHA,CACX,CAEQ,SAAU,CACd,IAAMC,EAAoB,CAAC,EAC3B,QAAQL,EAAE,EAAGA,EAAE,KAAK,EAAGA,IACnBK,EAAQ,KAAKL,CAAC,EAGlB,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAAK,CAC/C,IAAMM,EAAI,KAAK,MAAM,KAAK,OAAO,GAAKN,EAAI,EAAE,EAC5C,CAACK,EAAQL,CAAC,EAAGK,EAAQC,CAAC,CAAC,EAAI,CAACD,EAAQC,CAAC,EAAGD,EAAQL,CAAC,CAAC,CACtD,CAEA,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAC1C,CAAC,KAAK,SAASA,CAAC,EAAG,KAAK,SAASK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,SAASK,EAAQL,CAAC,CAAC,EAAG,KAAK,SAASA,CAAC,CAAC,EAC5F,CAAC,KAAK,OAAOA,CAAC,EAAG,KAAK,OAAOK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,OAAOK,EAAQL,CAAC,CAAC,EAAG,KAAK,OAAOA,CAAC,CAAC,CAE5F,CAEQ,gBAAgBO,EAA2BC,EAA8C,CAE7F,IAAMC,EAAgBxB,EAAiB,cAAc,KAAK,CAAC,EACvDyB,EAAe,EAEnB,QAASV,EAAI,EAAGA,EAAIO,EAAc,OAAQP,IAAK,CAE3C,IAAMW,EAAsBJ,EAAcP,CAAC,EAErCY,EAAcJ,EAAYR,CAAC,EAC3Ba,EAAiB,KAAK,QAAQF,CAAS,EACvCG,EAAOF,EAAcC,EAG3B,QAASP,EAAI,EAAGA,EAAI,KAAK,EAAGA,IACxBG,EAAcH,CAAC,GAAK,GAAKK,EAAUL,CAAC,EAAIQ,EAI5CJ,GAAgB,GAAKI,CACzB,CAGA,IAAMC,EAAa,CAAC,EAEpB,QAAQf,EAAE,EAAGA,EAAE,KAAK,QAAQ,OAAQA,IAAK,CAIrC,IAAMgB,EAHU,KAAK,QAAQhB,CAAC,EAGD,KAAK,QAAQ,aAAe,KAAK,UAAaS,EAAcT,CAAC,EAC1Fe,EAAW,KAAKC,CAAS,CAC7B,CAGA,IAAMC,EAAU,KAAK,KAAQ,KAAK,QAAQ,aAAe,KAAK,UAAaP,EAE3E,MAAO,CAACK,EAAYE,CAAO,CAC/B,CAEA,OAAQ,CACJ,QAAQjB,EAAI,EAAGA,EAAI,KAAK,QAAQ,OAAQA,IAAK,CAErC,KAAK,QAAQ,SACb,KAAK,QAAQ,EAIjB,QAASM,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,GAAK,KAAK,UAAW,CAE3D,IAAMC,EAAgB,KAAK,SAAS,MAAMD,EAAGA,EAAI,KAAK,SAAS,EACzDE,EAAc,KAAK,OAAO,MAAMF,EAAGA,EAAI,KAAK,SAAS,EAErD,CAACS,EAAYE,CAAO,EAAI,KAAK,gBAAgBV,EAAeC,CAAW,EAEzE,OAAO,KAAK,QAAQ,gBAAmB,YACvC,KAAK,QAAQ,eAAeR,EAAG,KAAK,QAAQ,OAAQe,EAAYE,CAAO,EAG3E,KAAK,QAAUF,EACf,KAAK,KAAOE,CAChB,CACJ,CAEA,MAAO,CAAC,KAAK,QAAS,KAAK,IAAI,CACnC,CAKA,QAAQC,EAAoB,CAExB,GAAIA,EAAS,SAAW,KAAK,QAAQ,OACjC,MAAM,IAAI,MAAM,0DAA0D,EAK9E,IAAIC,EAAa,KAAK,KAEtB,QAASnB,EAAI,EAAGA,EAAIkB,EAAS,OAAQlB,IACjCmB,GAAcD,EAASlB,CAAC,EAAI,KAAK,QAAQA,CAAC,EAG9C,OAAOmB,CACX,CAgBA,UAAW,CACP,IAAIC,EAAuB,EACvBC,EAAoB,EAElBC,EAAqB,KAAK,OAAO,QAAU,EAAI,EACjD,KAAK,OAAO,OAAO,CAAChC,EAAKS,IAAMT,EAAMS,CAAC,EAAI,KAAK,OAAO,OAE1D,QAAS,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,IAAK,CAC3C,IAAMa,EAAc,KAAK,OAAO,CAAC,EAC3BC,EAAiB,KAAK,QAAQ,KAAK,SAAS,CAAC,CAAC,EAEpDO,GAAyBnB,EAAAW,EAAcC,EAAmB,GAC1DQ,GAAsBpB,EAAAW,EAAcU,EAAuB,EAC/D,CAEA,MAAO,GAAKF,EAAuBC,CACvC,CASA,kBAAmB,CACf,GAAG,KAAK,SAAS,QAAU,EAAG,MAAO,GAErC,IAAIE,EAAM,EAEV,QAASvB,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMY,EAAc,KAAK,OAAOZ,CAAC,EAC3Ba,EAAiB,KAAK,QAAQ,KAAK,SAASb,CAAC,CAAC,EAEpDuB,GAAQtB,EAAAW,EAAcC,EAAmB,EAC7C,CAEA,OAAAU,GAAO,KAAK,SAAS,OAEdA,CACX,CA+CJ", + "sourcesContent": ["export * from './core/linear-regression';", "import { ILinearRegressionOptions } from '../interfaces';\n\n/**\n * Linear Regression\n *\n * Mean Squared Error (MSE): Error function = Loss function\n * E = (1/n) * sum_from_0_to_n((actual_value - predicted_value)^2)\n * E = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * ---------------------------------------------------------\n * Goal: Minimize the error function - find (m, b) with the lowest possible E.\n * How:\n *\n * - Take partial derivative with respect m and also with respect b.\n * This helps to find the \"m\" that maximally increase E,\n * and \"b\" that maximally increase E (the steepest ascent).\n *\n * - After we found them, we get the opposite direction\n * to find the way to decrease E (the steepest descent).\n * ---------------------------------------------------------\n *\n * How to calculate partial derivative of \"m\"?\n * dE/dm = (1/n) * sum_from_0_to_n(2 * (actual_value - (mx + b)) * (-x))\n * dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n * ---------------------------------------------------------\n *\n * How to calculate partial derivative of \"b\"?\n * dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n * ---------------------------------------------------------\n *\n * After the derivatives are found (the steepest ascent)\n * we need to find the steepest descent:\n *\n * new_m = current_m - learning_rate * dE/dm\n * new_b = current_b - learning_rate * dE/db\n *\n * General Form:\n * ------------\n * y = w1*x1 + w2*x2 + \u2026 + wn*xn + b\n * [w1, ..., wn] = weights, b = bias\n */\nexport class LinearRegression {\n\n options: ILinearRegressionOptions;\n weights: number[];\n bias: number;\n\n features: number[][];\n labels: number[];\n n: number;\n\n batchSize: number;\n\n constructor(options: ILinearRegressionOptions) {\n this.options = options;\n\n this.features = [...this.options.features];\n this.labels = [...this.options.labels];\n this.n = this.features.length > 0 ? this.features[0].length : 0;\n\n // Initialize weights to zero\n this.weights = LinearRegression.initZeroArray(this.n);\n this.weights.length = this.n;\n this.weights.fill(0);\n\n this.bias = 0;\n\n this.batchSize = this.options.batchSize ?? this.features.length;\n }\n\n private static initZeroArray(len: number) {\n const arr: number[] = [];\n arr.length = len;\n arr.fill(0);\n return arr;\n }\n\n private shuffle() {\n const indices: number[] = [];\n for(let i=0; i 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [indices[i], indices[j]] = [indices[j], indices[i]];\n }\n\n for (let i = this.features.length - 1; i > 0; i--) {\n [this.features[i], this.features[indices[i]]] = [this.features[indices[i]], this.features[i]];\n [this.labels[i], this.labels[indices[i]]] = [this.labels[indices[i]], this.labels[i]];\n }\n }\n\n private gradientDescent(batchFeatures: number[][], batchLabels: number[]) : [ number[], number ] {\n\n const mGradientSums = LinearRegression.initZeroArray(this.n);\n let bGradientSum = 0;\n\n for (let i = 0; i < batchFeatures.length; i++) {\n\n const _features: number[] = batchFeatures[i];\n\n const actualValue = batchLabels[i];\n const predictedValue = this.predict(_features);\n const diff = actualValue - predictedValue;\n\n // dE/dm = (-2/n) * sum_from_0_to_n(x * (actual_value - (mx + b)))\n for (let j = 0; j < this.n; j++) {\n mGradientSums[j] += -2 * _features[j] * diff;\n }\n\n // dE/db = (-2/n) * sum_from_0_to_n(actual_value - (mx + b))\n bGradientSum += -2 * diff;\n }\n\n // Update weights and bias using learning rate\n const newWeights = [];\n\n for(let i=0; i sum + x) / this.labels.length; // yMean\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n residualSumOfSquares += (actualValue - predictedValue) ** 2;\n totalSumOfSquares += (actualValue - meanOfActualValues) ** 2;\n }\n\n return 1 - (residualSumOfSquares / totalSumOfSquares);\n }\n\n /**\n * MSE = (1/n) * sum_from_0_to_n((actual_value - (mx + b))^2)\n * The ideal value of Mean Squared Error (MSE) is 0.\n * Achieving an MSE of 0 would mean that the model perfectly predicts the target variable\n * for every data point in the training set. However, it's important to note\n * that achieving an MSE of exactly 0 is extremely rare and often unrealistic, especially with real-world data.\n */\n meanSquaredError() {\n if(this.features.length <= 0) return 0;\n\n let mse = 0;\n\n for (let i = 0; i < this.features.length; i++) {\n const actualValue = this.labels[i];\n const predictedValue = this.predict(this.features[i]);\n\n mse += (actualValue - predictedValue) ** 2;\n }\n\n mse /= this.features.length;\n\n return mse;\n }\n\n /**\n * Compute the Pearson correlation coefficient.\n * --------------------------------------------\n * It is a statistical measure that quantifies the strength and direction of the linear relationship\n * between two variables. It's commonly used to assess the strength of association\n * between two continuous variables.\n *\n * Range [-1, 1]\n * r=1 indicates a perfect positive linear relationship,\n * meaning that as one variable increases, the other variable increases proportionally.\n *\n * r=\u22121 indicates a perfect negative linear relationship, meaning that as one variable increases,\n * the other variable decreases proportionally.\n *\n * r= 0 indicates no linear relationship between the variables.\n */\n pearson = () : number[] => {\n if (this.features.length <= 0 || this.labels.length <= 0) return [];\n\n const pearsonCoefficients: number[] = [];\n const yMean = this.labels.reduce((sum, y) => sum + y, 0) / this.labels.length;\n\n for (let featureIndex = 0; featureIndex < this.n; featureIndex++) {\n let sumXY = 0; // Sum of the product of (x - xMean) and (y - yMean)\n let sumX2 = 0; // Sum of squared differences between x and xMean\n let sumY2 = 0; // Sum of squared differences between y and yMean\n\n const xValues = this.features.map(feature => feature[featureIndex]);\n const xMean = xValues.reduce((sum, x) => sum + x, 0) / xValues.length;\n\n for (let i = 0; i < this.features.length; i++) {\n const x = this.features[i][featureIndex];\n const y = this.labels[i];\n\n sumXY += (x - xMean) * (y - yMean);\n sumX2 += (x - xMean) ** 2;\n sumY2 += (y - yMean) ** 2;\n }\n\n pearsonCoefficients.push((sumX2 === 0 || sumY2 === 0) ? 0 : (sumXY / Math.sqrt(sumX2 * sumY2)));\n }\n\n return pearsonCoefficients;\n }\n\n}"], + "mappings": ";;;;;;mjBAAA,IAAAA,EAAA,GAAAC,EAAAD,EAAA,sBAAAE,IAAA,eAAAC,EAAAH,GCwCO,IAAMI,EAAN,KAAuB,CAY1B,YAAYC,EAAmC,CAV/CC,EAAA,gBACAA,EAAA,gBACAA,EAAA,aAEAA,EAAA,iBACAA,EAAA,eACAA,EAAA,UAEAA,EAAA,kBAyMAA,EAAA,eAAU,IAAiB,CACvB,GAAI,KAAK,SAAS,QAAU,GAAK,KAAK,OAAO,QAAU,EAAG,MAAO,CAAC,EAElE,IAAMC,EAAgC,CAAC,EACjCC,EAAQ,KAAK,OAAO,OAAO,CAACC,EAAKC,IAAMD,EAAMC,EAAG,CAAC,EAAI,KAAK,OAAO,OAEvE,QAASC,EAAe,EAAGA,EAAe,KAAK,EAAGA,IAAgB,CAC9D,IAAIC,EAAQ,EACRC,EAAQ,EACRC,EAAQ,EAENC,EAAU,KAAK,SAAS,IAAIC,GAAWA,EAAQL,CAAY,CAAC,EAC5DM,EAAQF,EAAQ,OAAO,CAACN,EAAKS,IAAMT,EAAMS,EAAG,CAAC,EAAIH,EAAQ,OAE/D,QAASI,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMD,EAAI,KAAK,SAASC,CAAC,EAAER,CAAY,EACjCD,EAAI,KAAK,OAAOS,CAAC,EAEvBP,IAAUM,EAAID,IAAUP,EAAIF,GAC5BK,GAAUO,EAAAF,EAAID,EAAU,GACxBH,GAAUM,EAAAV,EAAIF,EAAU,EAC5B,CAEAD,EAAoB,KAAMM,IAAU,GAAKC,IAAU,EAAK,EAAKF,EAAQ,KAAK,KAAKC,EAAQC,CAAK,CAAE,CAClG,CAEA,OAAOP,CACX,GAtRJ,IAAAc,EAqDQ,KAAK,QAAUhB,EAEf,KAAK,SAAW,CAAC,GAAG,KAAK,QAAQ,QAAQ,EACzC,KAAK,OAAS,CAAC,GAAG,KAAK,QAAQ,MAAM,EACrC,KAAK,EAAI,KAAK,SAAS,OAAS,EAAI,KAAK,SAAS,CAAC,EAAE,OAAS,EAG9D,KAAK,QAAUD,EAAiB,cAAc,KAAK,CAAC,EACpD,KAAK,QAAQ,OAAS,KAAK,EAC3B,KAAK,QAAQ,KAAK,CAAC,EAEnB,KAAK,KAAO,EAEZ,KAAK,WAAYiB,EAAA,KAAK,QAAQ,YAAb,KAAAA,EAA0B,KAAK,SAAS,MAC7D,CAEA,OAAe,cAAcC,EAAa,CACtC,IAAMC,EAAgB,CAAC,EACvB,OAAAA,EAAI,OAASD,EACbC,EAAI,KAAK,CAAC,EACHA,CACX,CAEQ,SAAU,CACd,IAAMC,EAAoB,CAAC,EAC3B,QAAQL,EAAE,EAAGA,EAAE,KAAK,EAAGA,IACnBK,EAAQ,KAAKL,CAAC,EAGlB,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAAK,CAC/C,IAAMM,EAAI,KAAK,MAAM,KAAK,OAAO,GAAKN,EAAI,EAAE,EAC5C,CAACK,EAAQL,CAAC,EAAGK,EAAQC,CAAC,CAAC,EAAI,CAACD,EAAQC,CAAC,EAAGD,EAAQL,CAAC,CAAC,CACtD,CAEA,QAASA,EAAI,KAAK,SAAS,OAAS,EAAGA,EAAI,EAAGA,IAC1C,CAAC,KAAK,SAASA,CAAC,EAAG,KAAK,SAASK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,SAASK,EAAQL,CAAC,CAAC,EAAG,KAAK,SAASA,CAAC,CAAC,EAC5F,CAAC,KAAK,OAAOA,CAAC,EAAG,KAAK,OAAOK,EAAQL,CAAC,CAAC,CAAC,EAAI,CAAC,KAAK,OAAOK,EAAQL,CAAC,CAAC,EAAG,KAAK,OAAOA,CAAC,CAAC,CAE5F,CAEQ,gBAAgBO,EAA2BC,EAA8C,CAE7F,IAAMC,EAAgBxB,EAAiB,cAAc,KAAK,CAAC,EACvDyB,EAAe,EAEnB,QAASV,EAAI,EAAGA,EAAIO,EAAc,OAAQP,IAAK,CAE3C,IAAMW,EAAsBJ,EAAcP,CAAC,EAErCY,EAAcJ,EAAYR,CAAC,EAC3Ba,EAAiB,KAAK,QAAQF,CAAS,EACvCG,EAAOF,EAAcC,EAG3B,QAASP,EAAI,EAAGA,EAAI,KAAK,EAAGA,IACxBG,EAAcH,CAAC,GAAK,GAAKK,EAAUL,CAAC,EAAIQ,EAI5CJ,GAAgB,GAAKI,CACzB,CAGA,IAAMC,EAAa,CAAC,EAEpB,QAAQf,EAAE,EAAGA,EAAE,KAAK,QAAQ,OAAQA,IAAK,CAIrC,IAAMgB,EAHU,KAAK,QAAQhB,CAAC,EAGD,KAAK,QAAQ,aAAe,KAAK,UAAaS,EAAcT,CAAC,EAC1Fe,EAAW,KAAKC,CAAS,CAC7B,CAGA,IAAMC,EAAU,KAAK,KAAQ,KAAK,QAAQ,aAAe,KAAK,UAAaP,EAE3E,MAAO,CAACK,EAAYE,CAAO,CAC/B,CAEA,KAAM,CACF,QAAQjB,EAAI,EAAGA,EAAI,KAAK,QAAQ,OAAQA,IAAK,CAErC,KAAK,QAAQ,SACb,KAAK,QAAQ,EAIjB,QAASM,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,GAAK,KAAK,UAAW,CAE3D,IAAMC,EAAgB,KAAK,SAAS,MAAMD,EAAGA,EAAI,KAAK,SAAS,EACzDE,EAAc,KAAK,OAAO,MAAMF,EAAGA,EAAI,KAAK,SAAS,EAErD,CAACS,EAAYE,CAAO,EAAI,KAAK,gBAAgBV,EAAeC,CAAW,EAEzE,OAAO,KAAK,QAAQ,gBAAmB,YACvC,KAAK,QAAQ,eAAeR,EAAG,KAAK,QAAQ,OAAQe,EAAYE,CAAO,EAG3E,KAAK,QAAUF,EACf,KAAK,KAAOE,CAChB,CACJ,CAEA,MAAO,CAAC,KAAK,QAAS,KAAK,IAAI,CACnC,CAKA,QAAQC,EAAoB,CAExB,GAAIA,EAAS,SAAW,KAAK,QAAQ,OACjC,MAAM,IAAI,MAAM,0DAA0D,EAK9E,IAAIC,EAAa,KAAK,KAEtB,QAASnB,EAAI,EAAGA,EAAIkB,EAAS,OAAQlB,IACjCmB,GAAcD,EAASlB,CAAC,EAAI,KAAK,QAAQA,CAAC,EAG9C,OAAOmB,CACX,CAgBA,UAAW,CACP,IAAIC,EAAuB,EACvBC,EAAoB,EAElBC,EAAqB,KAAK,OAAO,QAAU,EAAI,EACjD,KAAK,OAAO,OAAO,CAAChC,EAAKS,IAAMT,EAAMS,CAAC,EAAI,KAAK,OAAO,OAE1D,QAAS,EAAI,EAAG,EAAI,KAAK,SAAS,OAAQ,IAAK,CAC3C,IAAMa,EAAc,KAAK,OAAO,CAAC,EAC3BC,EAAiB,KAAK,QAAQ,KAAK,SAAS,CAAC,CAAC,EAEpDO,GAAyBnB,EAAAW,EAAcC,EAAmB,GAC1DQ,GAAsBpB,EAAAW,EAAcU,EAAuB,EAC/D,CAEA,MAAO,GAAKF,EAAuBC,CACvC,CASA,kBAAmB,CACf,GAAG,KAAK,SAAS,QAAU,EAAG,MAAO,GAErC,IAAIE,EAAM,EAEV,QAASvB,EAAI,EAAGA,EAAI,KAAK,SAAS,OAAQA,IAAK,CAC3C,IAAMY,EAAc,KAAK,OAAOZ,CAAC,EAC3Ba,EAAiB,KAAK,QAAQ,KAAK,SAASb,CAAC,CAAC,EAEpDuB,GAAQtB,EAAAW,EAAcC,EAAmB,EAC7C,CAEA,OAAAU,GAAO,KAAK,SAAS,OAEdA,CACX,CA+CJ", "names": ["index_esm_exports", "__export", "LinearRegression", "__toCommonJS", "LinearRegression", "options", "__publicField", "pearsonCoefficients", "yMean", "sum", "y", "featureIndex", "sumXY", "sumX2", "sumY2", "xValues", "feature", "xMean", "x", "i", "__pow", "_a", "len", "arr", "indices", "j", "batchFeatures", "batchLabels", "mGradientSums", "bGradientSum", "_features", "actualValue", "predictedValue", "diff", "newWeights", "gradientM", "newBias", "features", "prediction", "residualSumOfSquares", "totalSumOfSquares", "meanOfActualValues", "mse"] } diff --git a/docs/css/styles.1710770887921.css b/docs/css/styles.1710779087934.css similarity index 100% rename from docs/css/styles.1710770887921.css rename to docs/css/styles.1710779087934.css diff --git a/docs/index.html b/docs/index.html index 7ab3059..7e474e2 100644 --- a/docs/index.html +++ b/docs/index.html @@ -16,8 +16,8 @@ - - + + @@ -91,6 +91,6 @@

+ \ No newline at end of file diff --git a/docs/js/index.1710770887921.js b/docs/js/index.1710779087934.js similarity index 99% rename from docs/js/index.1710770887921.js rename to docs/js/index.1710779087934.js index 260cb23..9a9a362 100644 --- a/docs/js/index.1710770887921.js +++ b/docs/js/index.1710779087934.js @@ -1,3 +1,3 @@ (()=>{var at="side-menu",rt=()=>{let t=document.querySelector(".side-menu");if(!t)return;let e=window.location.pathname,a=t.querySelector(`a[href='${e}']`);a&&a.scrollIntoView({block:"center"})},It=()=>{let t=window.localStorage.getItem(at);if(!t)return[];let e=[];try{e=JSON.parse(t)||[]}catch(a){}return e},$t=()=>{let t=document.querySelectorAll(".side-menu [data-collapsible-title]"),e=[];for(let a of t){let r=a.getAttribute("data-id")||"";if(!r)continue;let n=a.getAttribute("data-opened")==="true";e.push({id:r,opened:n})}window.localStorage.setItem(at,JSON.stringify(e))},Ht=()=>{let t=It();if(Array.isArray(t))for(let e of t){let a=document.querySelector(`.side-menu [data-id="${e.id}"]`);a&&nt(a,e.opened,!1)}},nt=(t,e,a)=>{var n;t.setAttribute("data-opened",e.toString());let r=t.querySelector("[data-arrow]");r&&(r.classList.toggle("rotate-90",e),(n=t.nextElementSibling)==null||n.classList.toggle("hidden",!e),a&&$t())},it=()=>{let t=document.querySelectorAll(".side-menu [data-collapsible-title]");for(let e of t)e.addEventListener("click",()=>{let a=e.getAttribute("data-opened")==="true";nt(e,!a,!0)});Ht()};var ot=()=>{let t=document.getElementById("mobile-menu-btn");if(!t)return;t.addEventListener("click",r=>{r.stopPropagation(),document.body.classList.toggle("mobile-menu-opened")}),document.body.addEventListener("click",()=>{document.body.classList.remove("mobile-menu-opened")});let e=document.getElementById("side-menu");if(!e)return;e.addEventListener("click",r=>{r.stopPropagation()});let a=document.getElementById("mobile-menu-close-btn");a&&a.addEventListener("click",()=>{document.body.classList.remove("mobile-menu-opened")})};var W="mode",st=()=>{let t=window.localStorage.getItem(W)||"light";document.documentElement.classList.toggle("dark",t==="dark");let e=document.getElementById("move-to-dark-mode-btn"),a=document.getElementById("move-to-light-mode-btn"),r=()=>{document.documentElement.classList.add("dark"),e.classList.add("hidden"),a.classList.remove("hidden"),window.localStorage.setItem(W,"dark")},n=()=>{document.documentElement.classList.remove("dark"),a.classList.add("hidden"),e.classList.remove("hidden"),window.localStorage.setItem(W,"light")};t==="dark"?r():n(),e==null||e.addEventListener("click",r),a==null||a.addEventListener("click",n)};var Nt=Object.defineProperty,lt=Object.getOwnPropertySymbols,Dt=Object.prototype.hasOwnProperty,Ft=Object.prototype.propertyIsEnumerable,ct=(t,e,a)=>e in t?Nt(t,e,{enumerable:!0,configurable:!0,writable:!0,value:a}):t[e]=a,q=(t,e)=>{for(var a in e||(e={}))Dt.call(e,a)&&ct(t,a,e[a]);if(lt)for(var a of lt(e))Ft.call(e,a)&&ct(t,a,e[a]);return t},qt=(t,e)=>{for(let a of e){let r=a[1];if(r===void 0)continue;let n=a[0];t.setAttribute(n,r.toString())}},wt=(t,e)=>{for(let a of t){let r=a[1];if(r===void 0)continue;let n=a[0];e[n]=r}},jt=t=>{let e=document.createElement("canvas");qt(e,[["id",t.id],["class",t.classes],["style",t.style],["title",t.title],["tabindex",t.tabindex],["role",t.role],["aria-label",t.ariaLabel]]),t.fallback&&(e.textContent=t.fallback);let a=typeof e.getContext=="function"?e.getContext("2d",t.contextAttributes):null;return e.width=t.width,e.height=t.height,{ctx:a,$canvas:e}},D=(t,e)=>{wt([["lineWidth",t.lineWidth],["strokeStyle",t.strokeStyle],["lineCap",t.lineCap],["lineJoin",t.lineJoin],["miterLimit",t.miterLimit],["lineDashOffset",t.lineDashOffset]],e),t.lineDashSegments&&e.setLineDash(t.lineDashSegments)},J=(t,e)=>{t.fillStyle!==void 0&&(e.fillStyle=t.fillStyle),wt([["shadowOffsetX",t.shadowOffsetX],["shadowOffsetY",t.shadowOffsetY],["shadowBlur",t.shadowBlur],["shadowColor",t.shadowColor]],e)},Bt=(t,e)=>{let{x1:a,y1:r,x2:n,y2:i}=t;e.save(),e.beginPath(),e.moveTo(a,r),e.lineTo(n,i),t.strokeStyle&&(D(t,e),e.stroke()),e.restore()},Ut=(t,e)=>{let{x:a,y:r,w:n,h:i}=t;if(t.clear){e.clearRect(a,r,n,i);return}if(t.radii){e.save(),J(t,e),D(t,e),e.beginPath(),e.roundRect(a,r,n,i,t.radii),t.fillStyle&&e.fill(),t.strokeStyle&&e.stroke(),e.restore();return}e.save(),t.fillStyle&&(J(t,e),e.fillRect(a,r,n,i)),t.strokeStyle&&(D(t,e),e.strokeRect(a,r,n,i)),e.restore()},Vt=(t,e)=>{let{cx:a,cy:r,r:n}=t,i=t.startAngleRad===void 0?0:t.startAngleRad,o=t.endAngleRad===void 0?2*Math.PI:t.endAngleRad;e.save(),e.beginPath(),J(t,e),D(t,e),e.arc(a,r,n,i,o,t.counterclockwise),t.fillStyle&&e.fill(),t.strokeStyle&&(D(t,e),e.stroke()),e.restore()},Wt=Math.pow,et=(t,e=1/0)=>{if(e===1/0)return t;e<0&&(e=0);let a=Wt(10,e);return Math.round(t*a)/a},Qt=(t,e,a=1/0)=>{let r=[];for(let n=0;n{let a=0;for(let r=0;r{let r=Qt(t,e);return Zt(r,a)},$=(t,e,a=1/0)=>et(Math.random()*(e-t)+t,a),Yt=(t,e)=>Math.floor(Math.random()*(e-t+1)+t),Gt=()=>Math.random()<.5,F=t=>{let e=Yt(0,t.length-1);return t[e]},Xt=()=>{let t=Kt();return te(t)},Kt=()=>{let t=$(1,360),e=$(0,100),a=$(0,100);return[t,e,a]},te=t=>{if(t[0]>360||t[1]>100||t[2]>100)return"#ffffff";if(t[0]<0||t[1]<0||t[2]<0)return"#000000";let e=t[0]/360,a=t[1]/100,r=t[2]/100,n,i,o;if(a===0)n=i=o=r;else{let l=(h,d,f)=>(f<0&&(f+=1),f>1&&(f-=1),f<.16666666666666666?h+(d-h)*6*f:f<.5?d:f<.6666666666666666?h+(d-h)*(.6666666666666666-f)*6:h),c=r<.5?r*(1+a):r+a-r*a,u=2*r-c;n=l(u,c,e+1/3),i=l(u,c,e),o=l(u,c,e-1/3)}let s=l=>{let c=Math.round(l*255).toString(16);return c.length===1?"0"+c:c};return`#${s(n)}${s(i)}${s(o)}`},ee=t=>{let e=t.duration!==void 0?t.duration:1/0,a,r,n,i,o=!1,s,l=()=>{a=void 0,n=void 0,i=void 0,o=!1,r!==void 0&&window.cancelAnimationFrame(r)},c=()=>{l(),v()},u=()=>{o=!1},h=()=>{o=!0},d=M=>{a===void 0&&(a=M),n=M-a,o&&i!==M&&typeof t.callback=="function"&&t.callback(y()),n<=e?(i=M,r=window.requestAnimationFrame(d)):l()},f=(M,g)=>{c(),typeof t.resizeCallback=="function"&&t.resizeCallback(M,g)},v=()=>{a=void 0,n=void 0,i=void 0,o=!0,t.restartOnResize&&window.ResizeObserver&&s===void 0?(s=new ResizeObserver(f),s.observe(document.body,{box:"border-box"})):r=window.requestAnimationFrame(d)},x=()=>n,A=()=>o,b=()=>a,p=()=>{if(!(e===1/0||n===void 0))return n*100/e},k=()=>s,y=()=>({start:v,stop:l,pause:u,resume:h,restart:c,isAnimating:A,getElapsedTime:x,getStartTime:b,getPercent:p,getResizeObserver:k});return y()},Y=(t,e,a,r)=>`rgba(${t}, ${e}, ${a}, ${r} )`,_t=640,At=768,Ct=1024,Lt=()=>{let t=Math.max(document.documentElement.clientWidth||0,window.innerWidth||0),e=Math.max(document.documentElement.clientHeight||0,window.innerHeight||0);return[t,e]},ae=(t,e)=>{let a=Lt()[0];return t.smConnectionSize!==void 0&&a<=_t?t.smConnectionSize*e:t.mdConnectionSize!==void 0&&a<=At?t.mdConnectionSize*e:t.lgConnectionSize!==void 0&&a<=Ct?t.lgConnectionSize*e:t.connectionSize*e},ut=t=>{let e=Lt()[0];return t.smParticlesNumber!==void 0&&e<=_t?t.smParticlesNumber:t.mdParticlesNumber!==void 0&&e<=At?t.mdParticlesNumber:t.lgParticlesNumber!==void 0&&e<=Ct?t.lgParticlesNumber:t.particlesNumber},re=(t,e)=>{var a;let{particles:r,ctx:n,connectionRgbColor:i}=e,o=(a=t.$placeholder)==null?void 0:a.getBoundingClientRect(),s=(o==null?void 0:o.width)||0,l=ae(t,s);for(let c=0;c{if(e===1/0)return t;e<0&&(e=0);let a=ne(10,e);return Math.round(t*a)/a},ie=(t,e=1/0)=>{let a=t*(Math.PI/180);return C(a,e)},G=t=>!isNaN(parseFloat(t))&&isFinite(t),X=(t,e=1/0)=>{let a=t[0],r=t[1],n=t[2]-r;return a===0&&n===0?1/0:a===0?NaN:C(n/a,e)},ht=(t,e=1/0)=>{let a=t[0],r=t[1],n=t[2],i=t[3];if(a===0){let u=X([r,n,i],e);return G(u)?[u]:[]}let o=n-i,s=r*r-4*a*o;if(s<0)return[];if(s===0)return[C(-r/(2*a),e)];let l=2*a,c=Math.sqrt(s);return[C((-r+c)/l,e),C((-r-c)/l,e)]},oe=(t,e,a,r,n=1/0)=>{let i=Math.pow(1-t,2),o=(1-t)*2*t,s=t*t;return[C(i*e[0]+o*a[0]+s*r[0],n),C(i*e[1]+o*a[1]+s*r[1],n)]},se=(t,e,a,r,n,i=1/0)=>{let o=Math.pow(1-t,3),s=Math.pow(1-t,2)*3*t,l=(1-t)*3*t*t,c=t*t*t;return[C(o*e[0]+s*a[0]+l*r[0]+c*n[0],i),C(o*e[1]+s*a[1]+l*r[1]+c*n[1],i)]},le=(t,e,a,r=1/0)=>{let n=2*t[0]-4*e[0]+2*a[0],i=-2*t[0]+2*e[0],o=X([n,i,0],r),s=2*t[1]-4*e[1]+2*a[1],l=-2*t[1]+2*e[1],c=X([s,l,0],r),u=[];return G(o)&&u.push(o),G(c)&&u.push(c),u},ce=(t,e,a,r,n=1/0)=>{let i=-3*t[0]+9*e[0]-9*a[0]+3*r[0],o=6*t[0]-12*e[0]+6*a[0],s=-3*t[0]+3*e[0],l=[i,o,s,0],c=-3*t[1]+9*e[1]-9*a[1]+3*r[1],u=6*t[1]-12*e[1]+6*a[1],h=-3*t[1]+3*e[1],d=[c,u,h,0],f=ht(l,n).filter(x=>x>=0&&x<=1),v=ht(d,n).filter(x=>x>=0&&x<=1);return[...f,...v].length===2?[...f,...v]:null},ue=(t,e,a,r=1/0)=>{let n=le(t,e,a),i=1/0,o=1/0,s=-1/0,l=-1/0;for(let c of n){let u=oe(c,t,e,a),h=u[0],d=u[1];i=Math.min(i,h),s=Math.max(s,h),o=Math.min(o,d),l=Math.max(l,d)}return i=C(Math.min(i,t[0],a[0]),r),s=C(Math.max(s,t[0],a[0]),r),o=C(Math.min(o,t[1],a[1]),r),l=C(Math.max(l,t[1],a[1]),r),{x:i,y:o,w:Math.abs(s-i),h:Math.abs(l-o),x2:s,y2:l}},he=(t,e,a,r,n=1/0)=>{let i=ce(t,e,a,r)||[],o=1/0,s=1/0,l=-1/0,c=-1/0;for(let u of i){let h=se(u,t,e,a,r),d=h[0],f=h[1];o=Math.min(o,d!=null?d:1/0),l=Math.max(l,d!=null?d:-1/0),s=Math.min(s,f!=null?f:1/0),c=Math.max(c,f!=null?f:-1/0)}return o=C(Math.min(o,t[0],r[0]),n),l=C(Math.max(l,t[0],r[0]),n),s=C(Math.min(s,t[1],r[1]),n),c=C(Math.max(c,t[1],r[1]),n),{x:o,y:s,w:Math.abs(l-o),h:Math.abs(c-s),x2:l,y2:c}},ft=new RegExp("^[+-]?(?=\\.\\d|\\d)(?:0|[1-9]\\d*)?(?:\\.\\d+)?(?:(?<=\\d)(?:[eE][+-]?\\d+))?"),fe=t=>{let e={tokens:[],errors:[]};if(!t||t.trim()==="")return e;let a=0,r=0,n=0,i=()=>a>=t.length,o=h=>{e.tokens.push({tokenType:h,line:r,col:n})},s=h=>{e.tokens.push({tokenType:"num",value:h,line:r,col:n})},l=h=>{e.errors.push({line:r,col:n,msg:h})},c=()=>i()?!1:ft.test(t.substring(a)),u=()=>{let h=t[a];if(h.charAt(0)===` `||h.charAt(0)==="\r"){a++,n=0,r++;return}if(/\s/.test(h)||h===","){a++,n++;return}if(c()){let d=t.substring(a).match(ft);if(d&&d.length>0){let f=d[0];s(f),a+=f.length,n+=f.length;return}}switch(h){case"M":o("M");break;case"m":o("m");break;case"Z":o("Z");break;case"z":o("z");break;case"L":o("L");break;case"l":o("l");break;case"H":o("H");break;case"h":o("h");break;case"V":o("V");break;case"v":o("v");break;case"C":o("C");break;case"c":o("c");break;case"S":o("S");break;case"s":o("s");break;case"Q":o("Q");break;case"q":o("q");break;case"T":o("T");break;case"t":o("t");break;case"A":o("A");break;case"a":o("a");break;default:{l(`Unexpected character ${h}`);break}}a++,n++};for(;!i();)u();return e},me=t=>{let e={commands:[],errors:t.errors||[]};if(t.errors.length>0||t.tokens.length===0)return e;let{tokens:a,errors:r}=t,n=(u,h)=>{r.push({line:u==null?void 0:u.line,col:u==null?void 0:u.col,msg:h})};if(a[0].tokenType!=="M"&&a[0].tokenType!=="m")return n(a[0],"A path data segment must begin with a 'moveto' command 'M' or 'm'."),e;let i=0,o=()=>i>=a.length,s=u=>{var h,d;if(!u||u.toLowerCase()!=="a")return!0;let f=(((h=a[i+4])==null?void 0:h.value)||"").toString(),v=(((d=a[i+5])==null?void 0:d.value)||"").toString();return(f==="0"||f==="1")&&(v==="0"||v==="1")},l=(u,h,d)=>{var f;let v=a[i].tokenType,x=[];if(u>0)for(let p=1;p<=u;p++){if(!a[i+p]||a[i+p].tokenType!=="num"){n(a[i],`Expected number(s) after command ${v}.`),i+=u;return}x.push(Number(a[i+p].value))}if(!s(v)){n(a[i],"Arc flags must be 0 or 1."),i+=u+1;return}if(!s(v)){n(a[i],"Arc flags must be 0 or 1."),i+=u+1;return}if(e.commands.push({command:a[i].tokenType,params:x}),i+=u+1,u<=0)return;let A=[];for(;((f=a[i])==null?void 0:f.tokenType)==="num";)A.push(a[i]),i++;if(A.length%u!==0){n(A[A.length-1],"Expected a number.");return}let b=d?h.toLowerCase():h.toUpperCase();for(let p=0;p{let u=a[i],h=u.tokenType.toLowerCase()===u.tokenType;switch(u.tokenType){case"M":case"m":case"L":case"l":{l(2,"L",h);break}case"Z":case"z":{l(0,"L",h);break}case"H":case"h":case"V":case"v":{l(1,u.tokenType,h);break}case"C":case"c":{l(6,u.tokenType,h);break}case"S":case"s":case"Q":case"q":{l(4,u.tokenType,h);break}case"T":case"t":{l(2,u.tokenType,h);break}case"A":case"a":{l(7,u.tokenType,h);break}default:{n(a[i],"Wrong path command."),i++;break}}};for(l(2,"L",a[0].tokenType==="m");!o();)c();return e},de=t=>{let{commands:e}=t;if(e.length<=0)return t;let a=e[0].params[0],r=e[0].params[1],n=a,i=r;e[0].command="M";for(let o=1;o{let{commands:e}=t;if(e.length<=0)return t;e[0].command="M";for(let a=1;a{let e=fe(t);return me(e)},R=(t,e)=>{let a=2*Math.PI,r=e>0?1:-1;return(a+r*Math.acos(t/Math.sqrt(t*t+e*e)))%a},Q=(t,e,a,r)=>({x:t,y:a,w:Math.abs(r-a),h:Math.abs(e-t),x2:e,y2:r}),pe=(t,e,a,r,n,i,o,s,l)=>{let c,u,h,d;if(a<0&&(a*=-1),r<0&&(r*=-1),a===0||r===0)return c=ts?t:s,h=el?e:l,Q(c,u,h,d);let f=Math.cos(n)*(t-s)/2+Math.sin(n)*(e-l)/2,v=-Math.sin(n)*(t-s)/2+Math.cos(n)*(e-l)/2,x=a*a*r*r-a*a*v*v-r*r*f*f;x/=a*a*v*v+r*r*f*f;let A=0,b=0;if(x<0){let O=a/r;if(x=v*v+f*f/(O*O),x<0)return c=ts?t:s,h=el?e:l,Q(c,u,h,d);r=Math.sqrt(x),a=O*r}else{let O=(i==o?-1:1)*Math.sqrt(x);A=O*a*v/r,b=-O*r*f/a}let p=A*Math.cos(n)-b*Math.sin(n)+(t+s)/2,k=A*Math.sin(n)+b*Math.cos(n)+(e+l)/2,y,M,g,S;if(n===0||n===Math.PI)c=p-a,y=R(-a,0),u=p+a,M=R(a,0),h=k-r,g=R(0,-r),d=k+r,S=R(0,r);else if(n===Math.PI/2||n===3*Math.PI/2)c=p-r,y=R(-r,0),u=p+r,M=R(r,0),h=k-a,g=R(0,-a),d=k+a,S=R(0,a);else{y=-Math.atan(r*Math.tan(n)/a),M=Math.PI-Math.atan(r*Math.tan(n)/a),c=p+a*Math.cos(M)*Math.cos(n)-r*Math.sin(y)*Math.sin(n),u=p+a*Math.cos(M)*Math.cos(n)-r*Math.sin(M)*Math.sin(n),c>u&&([c,u]=[u,c],[y,M]=[M,y]);let O=k+a*Math.cos(y)*Math.sin(n)+r*Math.sin(y)*Math.cos(n);y=R(c-p,O-k),O=k+a*Math.cos(M)*Math.sin(n)+r*Math.sin(M)*Math.cos(n),M=R(u-p,O-k),g=Math.atan(r/(Math.tan(n)*a)),S=Math.atan(r/(Math.tan(n)*a))+Math.PI,h=k+a*Math.cos(g)*Math.sin(n)+r*Math.sin(g)*Math.cos(n),d=k+a*Math.cos(S)*Math.sin(n)+r*Math.sin(S)*Math.cos(n),h>d&&([h,d]=[d,h],[g,S]=[S,g]);let V=p+a*Math.cos(g)*Math.cos(n)-r*Math.sin(g)*Math.sin(n);g=R(V-p,h-k),V=p+a*Math.cos(S)*Math.cos(n)-r*Math.sin(S)*Math.sin(n),S=R(V-p,d-k)}let L=R(t-p,e-k),w=R(s-p,l-k);o||([L,w]=[w,L]);let P=!1;return L>w&&([L,w]=[w,L],P=!0),(!P&&(L>y||wy||wM||wM||wg||wg||wS||wS||w{var a,r,n,i;if(!t||t.trim()==="")return null;let o=ge(t);if(o.errors.length>0)return null;let s=de(o);if(!s||s.commands.length<=0)return null;let l=1/0,c=1/0,u=-1/0,h=-1/0,d=be(s),f=d.commands[0].params[0],v=d.commands[0].params[1],x=f,A=v;for(let b of d.commands)switch(b.command){case"M":{l=Math.min(l,b.params[0]),c=Math.min(c,b.params[1]),u=Math.max(u,b.params[0]),h=Math.max(h,b.params[1]),x=b.params[0],A=b.params[1];break}case"Z":{f=x,v=A;break}case"L":{l=Math.min(l,b.params[0]),c=Math.min(c,b.params[1]),u=Math.max(u,b.params[0]),h=Math.max(h,b.params[1]),f=b.params[0],v=b.params[1];break}case"C":{let p=[f,v],k=[b.params[0],b.params[1]],y=[b.params[2],b.params[3]],M=[b.params[4],b.params[5]],g=he(p,k,y,M);l=Math.min(l,g.x),c=Math.min(c,g.y),u=Math.max(u,g.x2),h=Math.max(h,g.y2),f=b.params[4],v=b.params[5];break}case"Q":{let p=[f,v],k=[b.params[0],b.params[1]],y=[b.params[2],b.params[3]],M=ue(p,k,y);l=Math.min(l,M.x),c=Math.min(c,M.y),u=Math.max(u,M.x2),h=Math.max(h,M.y2),f=b.params[2],v=b.params[3];break}case"A":{let p=b.params[0],k=b.params[1],y=b.params[2],M=b.params[3],g=b.params[4],S=b.params[5],L=b.params[6],w=pe(f,v,p,k,ie(y),M===1,g===1,S,L);l=Math.min(l,(a=w==null?void 0:w.x)!=null?a:0),c=Math.min(c,(r=w==null?void 0:w.y)!=null?r:0),u=Math.max(u,(n=w==null?void 0:w.x2)!=null?n:0),h=Math.max(h,(i=w==null?void 0:w.y2)!=null?i:0),f=b.params[5],v=b.params[6];break}}return{x:C(l,e),y:C(c,e),w:C(Math.abs(u-l),e),h:C(Math.abs(h-c),e),x2:C(u,e),y2:C(h,e)}};function j(t){return j=typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?function(e){return typeof e}:function(e){return e&&typeof Symbol=="function"&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},j(t)}var Me=/^\s+/,ye=/\s+$/;function m(t,e){if(t=t||"",e=e||{},t instanceof m)return t;if(!(this instanceof m))return new m(t,e);var a=ke(t);this._originalInput=t,this._r=a.r,this._g=a.g,this._b=a.b,this._a=a.a,this._roundA=Math.round(100*this._a)/100,this._format=e.format||a.format,this._gradientType=e.gradientType,this._r<1&&(this._r=Math.round(this._r)),this._g<1&&(this._g=Math.round(this._g)),this._b<1&&(this._b=Math.round(this._b)),this._ok=a.ok}m.prototype={isDark:function(){return this.getBrightness()<128},isLight:function(){return!this.isDark()},isValid:function(){return this._ok},getOriginalInput:function(){return this._originalInput},getFormat:function(){return this._format},getAlpha:function(){return this._a},getBrightness:function(){var t=this.toRgb();return(t.r*299+t.g*587+t.b*114)/1e3},getLuminance:function(){var t=this.toRgb(),e,a,r,n,i,o;return e=t.r/255,a=t.g/255,r=t.b/255,e<=.03928?n=e/12.92:n=Math.pow((e+.055)/1.055,2.4),a<=.03928?i=a/12.92:i=Math.pow((a+.055)/1.055,2.4),r<=.03928?o=r/12.92:o=Math.pow((r+.055)/1.055,2.4),.2126*n+.7152*i+.0722*o},setAlpha:function(t){return this._a=Rt(t),this._roundA=Math.round(100*this._a)/100,this},toHsv:function(){var t=dt(this._r,this._g,this._b);return{h:t.h*360,s:t.s,v:t.v,a:this._a}},toHsvString:function(){var t=dt(this._r,this._g,this._b),e=Math.round(t.h*360),a=Math.round(t.s*100),r=Math.round(t.v*100);return this._a==1?"hsv("+e+", "+a+"%, "+r+"%)":"hsva("+e+", "+a+"%, "+r+"%, "+this._roundA+")"},toHsl:function(){var t=mt(this._r,this._g,this._b);return{h:t.h*360,s:t.s,l:t.l,a:this._a}},toHslString:function(){var t=mt(this._r,this._g,this._b),e=Math.round(t.h*360),a=Math.round(t.s*100),r=Math.round(t.l*100);return this._a==1?"hsl("+e+", "+a+"%, "+r+"%)":"hsla("+e+", "+a+"%, "+r+"%, "+this._roundA+")"},toHex:function(t){return bt(this._r,this._g,this._b,t)},toHexString:function(t){return"#"+this.toHex(t)},toHex8:function(t){return _e(this._r,this._g,this._b,this._a,t)},toHex8String:function(t){return"#"+this.toHex8(t)},toRgb:function(){return{r:Math.round(this._r),g:Math.round(this._g),b:Math.round(this._b),a:this._a}},toRgbString:function(){return this._a==1?"rgb("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+")":"rgba("+Math.round(this._r)+", "+Math.round(this._g)+", "+Math.round(this._b)+", "+this._roundA+")"},toPercentageRgb:function(){return{r:Math.round(_(this._r,255)*100)+"%",g:Math.round(_(this._g,255)*100)+"%",b:Math.round(_(this._b,255)*100)+"%",a:this._a}},toPercentageRgbString:function(){return this._a==1?"rgb("+Math.round(_(this._r,255)*100)+"%, "+Math.round(_(this._g,255)*100)+"%, "+Math.round(_(this._b,255)*100)+"%)":"rgba("+Math.round(_(this._r,255)*100)+"%, "+Math.round(_(this._g,255)*100)+"%, "+Math.round(_(this._b,255)*100)+"%, "+this._roundA+")"},toName:function(){return this._a===0?"transparent":this._a<1?!1:He[bt(this._r,this._g,this._b,!0)]||!1},toFilter:function(t){var e="#"+gt(this._r,this._g,this._b,this._a),a=e,r=this._gradientType?"GradientType = 1, ":"";if(t){var n=m(t);a="#"+gt(n._r,n._g,n._b,n._a)}return"progid:DXImageTransform.Microsoft.gradient("+r+"startColorstr="+e+",endColorstr="+a+")"},toString:function(t){var e=!!t;t=t||this._format;var a=!1,r=this._a<1&&this._a>=0,n=!e&&r&&(t==="hex"||t==="hex6"||t==="hex3"||t==="hex4"||t==="hex8"||t==="name");return n?t==="name"&&this._a===0?this.toName():this.toRgbString():(t==="rgb"&&(a=this.toRgbString()),t==="prgb"&&(a=this.toPercentageRgbString()),(t==="hex"||t==="hex6")&&(a=this.toHexString()),t==="hex3"&&(a=this.toHexString(!0)),t==="hex4"&&(a=this.toHex8String(!0)),t==="hex8"&&(a=this.toHex8String()),t==="name"&&(a=this.toName()),t==="hsl"&&(a=this.toHslString()),t==="hsv"&&(a=this.toHsvString()),a||this.toHexString())},clone:function(){return m(this.toString())},_applyModification:function(t,e){var a=t.apply(null,[this].concat([].slice.call(e)));return this._r=a._r,this._g=a._g,this._b=a._b,this.setAlpha(a._a),this},lighten:function(){return this._applyModification(Re,arguments)},brighten:function(){return this._applyModification(Ee,arguments)},darken:function(){return this._applyModification(Te,arguments)},desaturate:function(){return this._applyModification(Ae,arguments)},saturate:function(){return this._applyModification(Ce,arguments)},greyscale:function(){return this._applyModification(Le,arguments)},spin:function(){return this._applyModification(ze,arguments)},_applyCombination:function(t,e){return t.apply(null,[this].concat([].slice.call(e)))},analogous:function(){return this._applyCombination(Ie,arguments)},complement:function(){return this._applyCombination(Oe,arguments)},monochromatic:function(){return this._applyCombination($e,arguments)},splitcomplement:function(){return this._applyCombination(Pe,arguments)},triad:function(){return this._applyCombination(pt,[3])},tetrad:function(){return this._applyCombination(pt,[4])}};m.fromRatio=function(t,e){if(j(t)=="object"){var a={};for(var r in t)t.hasOwnProperty(r)&&(r==="a"?a[r]=t[r]:a[r]=N(t[r]));t=a}return m(t,e)};function ke(t){var e={r:0,g:0,b:0},a=1,r=null,n=null,i=null,o=!1,s=!1;return typeof t=="string"&&(t=qe(t)),j(t)=="object"&&(I(t.r)&&I(t.g)&&I(t.b)?(e=xe(t.r,t.g,t.b),o=!0,s=String(t.r).substr(-1)==="%"?"prgb":"rgb"):I(t.h)&&I(t.s)&&I(t.v)?(r=N(t.s),n=N(t.v),e=we(t.h,r,n),o=!0,s="hsv"):I(t.h)&&I(t.s)&&I(t.l)&&(r=N(t.s),i=N(t.l),e=Se(t.h,r,i),o=!0,s="hsl"),t.hasOwnProperty("a")&&(a=t.a)),a=Rt(a),{ok:o,format:t.format||s,r:Math.min(255,Math.max(e.r,0)),g:Math.min(255,Math.max(e.g,0)),b:Math.min(255,Math.max(e.b,0)),a}}function xe(t,e,a){return{r:_(t,255)*255,g:_(e,255)*255,b:_(a,255)*255}}function mt(t,e,a){t=_(t,255),e=_(e,255),a=_(a,255);var r=Math.max(t,e,a),n=Math.min(t,e,a),i,o,s=(r+n)/2;if(r==n)i=o=0;else{var l=r-n;switch(o=s>.5?l/(2-r-n):l/(r+n),r){case t:i=(e-a)/l+(e1&&(h-=1),h<1/6?c+(u-c)*6*h:h<1/2?u:h<2/3?c+(u-c)*(2/3-h)*6:c}if(e===0)r=n=i=a;else{var s=a<.5?a*(1+e):a+e-a*e,l=2*a-s;r=o(l,s,t+1/3),n=o(l,s,t),i=o(l,s,t-1/3)}return{r:r*255,g:n*255,b:i*255}}function dt(t,e,a){t=_(t,255),e=_(e,255),a=_(a,255);var r=Math.max(t,e,a),n=Math.min(t,e,a),i,o,s=r,l=r-n;if(o=r===0?0:l/r,r==n)i=0;else{switch(r){case t:i=(e-a)/l+(e>1)+720)%360;--e;)r.h=(r.h+n)%360,i.push(m(r));return i}function $e(t,e){e=e||6;for(var a=m(t).toHsv(),r=a.h,n=a.s,i=a.v,o=[],s=1/e;e--;)o.push(m({h:r,s:n,v:i})),i=(i+s)%1;return o}m.mix=function(t,e,a){a=a===0?0:a||50;var r=m(t).toRgb(),n=m(e).toRgb(),i=a/100,o={r:(n.r-r.r)*i+r.r,g:(n.g-r.g)*i+r.g,b:(n.b-r.b)*i+r.b,a:(n.a-r.a)*i+r.a};return m(o)};m.readability=function(t,e){var a=m(t),r=m(e);return(Math.max(a.getLuminance(),r.getLuminance())+.05)/(Math.min(a.getLuminance(),r.getLuminance())+.05)};m.isReadable=function(t,e,a){var r=m.readability(t,e),n,i;switch(i=!1,n=je(a),n.level+n.size){case"AAsmall":case"AAAlarge":i=r>=4.5;break;case"AAlarge":i=r>=3;break;case"AAAsmall":i=r>=7;break}return i};m.mostReadable=function(t,e,a){var r=null,n=0,i,o,s,l;a=a||{},o=a.includeFallbackColors,s=a.level,l=a.size;for(var c=0;cn&&(n=i,r=m(e[c]));return m.isReadable(t,r,{level:s,size:l})||!o?r:(a.includeFallbackColors=!1,m.mostReadable(t,["#fff","#000"],a))};var K=m.names={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"0ff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000",blanchedalmond:"ffebcd",blue:"00f",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",burntsienna:"ea7e5d",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"0ff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkgrey:"a9a9a9",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkslategrey:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dimgrey:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"f0f",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",grey:"808080",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgray:"d3d3d3",lightgreen:"90ee90",lightgrey:"d3d3d3",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"789",lightslategrey:"789",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"0f0",limegreen:"32cd32",linen:"faf0e6",magenta:"f0f",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370db",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"db7093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",rebeccapurple:"663399",red:"f00",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",slategrey:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"fff",whitesmoke:"f5f5f5",yellow:"ff0",yellowgreen:"9acd32"},He=m.hexNames=Ne(K);function Ne(t){var e={};for(var a in t)t.hasOwnProperty(a)&&(e[t[a]]=a);return e}function Rt(t){return t=parseFloat(t),(isNaN(t)||t<0||t>1)&&(t=1),t}function _(t,e){De(t)&&(t="100%");var a=Fe(t);return t=Math.min(e,Math.max(0,parseFloat(t))),a&&(t=parseInt(t*e,10)/100),Math.abs(t-e)<1e-6?1:t%e/parseFloat(e)}function U(t){return Math.min(1,Math.max(0,t))}function E(t){return parseInt(t,16)}function De(t){return typeof t=="string"&&t.indexOf(".")!=-1&&parseFloat(t)===1}function Fe(t){return typeof t=="string"&&t.indexOf("%")!=-1}function z(t){return t.length==1?"0"+t:""+t}function N(t){return t<=1&&(t=t*100+"%"),t}function Et(t){return Math.round(parseFloat(t)*255).toString(16)}function vt(t){return E(t)/255}var T=function(){var t="[-\\+]?\\d+%?",e="[-\\+]?\\d*\\.\\d+%?",a="(?:"+e+")|(?:"+t+")",r="[\\s|\\(]+("+a+")[,|\\s]+("+a+")[,|\\s]+("+a+")\\s*\\)?",n="[\\s|\\(]+("+a+")[,|\\s]+("+a+")[,|\\s]+("+a+")[,|\\s]+("+a+")\\s*\\)?";return{CSS_UNIT:new RegExp(a),rgb:new RegExp("rgb"+r),rgba:new RegExp("rgba"+n),hsl:new RegExp("hsl"+r),hsla:new RegExp("hsla"+n),hsv:new RegExp("hsv"+r),hsva:new RegExp("hsva"+n),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/}}();function I(t){return!!T.CSS_UNIT.exec(t)}function qe(t){t=t.replace(Me,"").replace(ye,"").toLowerCase();var e=!1;if(K[t])t=K[t],e=!0;else if(t=="transparent")return{r:0,g:0,b:0,a:0,format:"name"};var a;return(a=T.rgb.exec(t))?{r:a[1],g:a[2],b:a[3]}:(a=T.rgba.exec(t))?{r:a[1],g:a[2],b:a[3],a:a[4]}:(a=T.hsl.exec(t))?{h:a[1],s:a[2],l:a[3]}:(a=T.hsla.exec(t))?{h:a[1],s:a[2],l:a[3],a:a[4]}:(a=T.hsv.exec(t))?{h:a[1],s:a[2],v:a[3]}:(a=T.hsva.exec(t))?{h:a[1],s:a[2],v:a[3],a:a[4]}:(a=T.hex8.exec(t))?{r:E(a[1]),g:E(a[2]),b:E(a[3]),a:vt(a[4]),format:e?"name":"hex8"}:(a=T.hex6.exec(t))?{r:E(a[1]),g:E(a[2]),b:E(a[3]),format:e?"name":"hex"}:(a=T.hex4.exec(t))?{r:E(a[1]+""+a[1]),g:E(a[2]+""+a[2]),b:E(a[3]+""+a[3]),a:vt(a[4]+""+a[4]),format:e?"name":"hex8"}:(a=T.hex3.exec(t))?{r:E(a[1]+""+a[1]),g:E(a[2]+""+a[2]),b:E(a[3]+""+a[3]),format:e?"name":"hex"}:!1}function je(t){var e,a;return t=t||{level:"AA",size:"small"},e=(t.level||"AA").toUpperCase(),a=(t.size||"small").toLowerCase(),e!=="AA"&&e!=="AAA"&&(e="AA"),a!=="small"&&a!=="large"&&(a="small"),{level:e,size:a}}var Mt=(t,e)=>{let{$canvas:a}=e,r=[],n,i,o,s=t.minSize,l=t.maxSize,c=t.minSpeed,u=t.maxSpeed,h=t.maxScale,d=t.minScale;for(let f=0;f0){n=F(t.svgPathData);let p=ve(n);p&&(i=[p.w,p.h],o=[x[0]/p.w,x[1]/p.h])}let A;t.particlesColors&&t.particlesColors.length>0?A=F(t.particlesColors):A=Xt();let b=m(A).toRgb();r.push({center:[$(0,a.width),$(0,a.height)],speed:[$(c,u),$(c,u)],size:x,color:A,rgbaColor:[b.r,b.g,b.b,b.a],svgPathData:n,svgSize:i,scaleSize:o,angleRad:0,rotateCounterClockwise:Gt(),scale:(d+h)/2,scaleDirection:F([-1,1]),opacity:$(0,1),opacityDirection:F([-1,1])})}return r},Be=(t,e,a)=>{let{$canvas:r}=a,n=q({},t),[i,o]=t.center,s=[...t.speed];if((i>r.width||i<0)&&(s[0]=-s[0]),(o>r.height||o<0)&&(s[1]=-s[1]),n.speed=s,n.center=[i+n.speed[0],o+n.speed[1]],e.rotate&&(t.rotateCounterClockwise?n.angleRad+=Math.PI/180:n.angleRad-=Math.PI/180),e.scaleInOut){let l=e.scaleStep,c=e.maxScale,u=e.minScale;n.scaleDirection>0?n.scale+=l:n.scale-=l,n.scale>c&&(n.scale=c,n.scaleDirection=-1),n.scale0?n.opacity+=l:n.opacity-=l,n.opacity>1&&(n.scale=1,n.opacityDirection=-1),n.opacity<0&&(n.opacity=0,n.opacityDirection=1)}return n},Ue=(t,e,a)=>{let{ctx:r}=a;if(!e.svgPathData){r.save();let{size:h}=t,d=h[0];e.scaleInOut&&(d*=t.scale);let f=e.fadeInOut?Y(t.rgbaColor[0],t.rgbaColor[1],t.rgbaColor[2],t.opacity):t.color;Vt({cx:t.center[0],cy:t.center[1],r:d,fillStyle:f},r),r.restore();return}let[n,i]=t.svgSize?t.svgSize:t.size,o=new Path2D(t.svgPathData);r.save();let s=n/2,l=i/2,[c,u]=[t.center[0]-s,t.center[1]-l];r.translate(c,u),t.scaleSize&&(r.translate(s,l),r.scale(...t.scaleSize),r.translate(-s,-l)),e.rotate&&(r.translate(s,l),r.rotate(t.angleRad),r.translate(-s,-l)),e.scaleInOut&&(r.translate(s,l),r.scale(t.scale,t.scale),r.translate(-s,-l)),e.fadeInOut?r.fillStyle=Y(t.rgbaColor[0],t.rgbaColor[1],t.rgbaColor[2],t.opacity):r.fillStyle=t.color,r.fill(o),r.restore()},Ve={particlesNumber:70,lgParticlesNumber:60,mdParticlesNumber:50,smParticlesNumber:30,resizeDebounceTime:1e3,particlesColors:[],minSpeed:-2,maxSpeed:2,minSize:5,maxSize:10,connected:!0,connectionColor:"rgb(70,126,150)",connectionSize:.09,lgConnectionSize:.15,mdConnectionSize:.2,smConnectionSize:.2,maxScale:2,minScale:.5,scaleStep:.01,opacityStep:.001},We=(t,e)=>e?q(q({},t),e):q({},t);function Qe(t){var e=typeof t;return t!=null&&(e=="object"||e=="function")}var tt=Qe,Ze=typeof global=="object"&&global&&global.Object===Object&&global,Je=Ze,Ye=typeof self=="object"&&self&&self.Object===Object&&self,Ge=Je||Ye||Function("return this")(),Tt=Ge,Xe=function(){return Tt.Date.now()},Z=Xe,Ke=/\s/;function ta(t){for(var e=t.length;e--&&Ke.test(t.charAt(e)););return e}var ea=ta,aa=/^\s+/;function ra(t){return t&&t.slice(0,ea(t)+1).replace(aa,"")}var na=ra,ia=Tt.Symbol,B=ia,zt=Object.prototype,oa=zt.hasOwnProperty,sa=zt.toString,H=B?B.toStringTag:void 0;function la(t){var e=oa.call(t,H),a=t[H];try{t[H]=void 0;var r=!0}catch(i){}var n=sa.call(t);return r&&(e?t[H]=a:delete t[H]),n}var ca=la,ua=Object.prototype,ha=ua.toString;function fa(t){return ha.call(t)}var ma=fa,da="[object Null]",ba="[object Undefined]",yt=B?B.toStringTag:void 0;function ga(t){return t==null?t===void 0?ba:da:yt&&yt in Object(t)?ca(t):ma(t)}var pa=ga;function va(t){return t!=null&&typeof t=="object"}var Ma=va,ya="[object Symbol]";function ka(t){return typeof t=="symbol"||Ma(t)&&pa(t)==ya}var xa=ka,kt=0/0,Sa=/^[-+]0x[0-9a-f]+$/i,wa=/^0b[01]+$/i,_a=/^0o[0-7]+$/i,Aa=parseInt;function Ca(t){if(typeof t=="number")return t;if(xa(t))return kt;if(tt(t)){var e=typeof t.valueOf=="function"?t.valueOf():t;t=tt(e)?e+"":e}if(typeof t!="string")return t===0?t:+t;t=na(t);var a=wa.test(t);return a||_a.test(t)?Aa(t.slice(2),a?2:8):Sa.test(t)?kt:+t}var xt=Ca,La="Expected a function",Ra=Math.max,Ea=Math.min;function Ta(t,e,a){var r,n,i,o,s,l,c=0,u=!1,h=!1,d=!0;if(typeof t!="function")throw new TypeError(La);e=xt(e)||0,tt(a)&&(u=!!a.leading,h="maxWait"in a,i=h?Ra(xt(a.maxWait)||0,e):i,d="trailing"in a?!!a.trailing:d);function f(g){var S=r,L=n;return r=n=void 0,c=g,o=t.apply(L,S),o}function v(g){return c=g,s=setTimeout(b,e),u?f(g):o}function x(g){var S=g-l,L=g-c,w=e-S;return h?Ea(w,i-L):w}function A(g){var S=g-l,L=g-c;return l===void 0||S>=e||S<0||h&&L>=i}function b(){var g=Z();if(A(g))return p(g);s=setTimeout(b,x(g))}function p(g){return s=void 0,d&&r?f(g):(r=n=void 0,o)}function k(){s!==void 0&&clearTimeout(s),c=0,r=l=n=s=void 0}function y(){return s===void 0?o:p(Z())}function M(){var g=Z(),S=A(g);if(r=arguments,n=this,l=g,S){if(s===void 0)return v(l);if(h)return clearTimeout(s),s=setTimeout(b,e),f(l)}return s===void 0&&(s=setTimeout(b,e)),o}return M.cancel=k,M.flush=y,M}var za=Ta,St=(t,e)=>{let{$canvas:a,ctx:r}=e,n={x:0,y:0,w:a.width,h:a.height};t.canvasColor?n.fillStyle=t.canvasColor:n.clear=!0,Ut(n,r),t.connected&&re(t,e);for(let i=0;i{let e=We(Ve,t);if(!e.$placeholder)return;let a=e.$placeholder.getBoundingClientRect(),r={width:a.width,height:a.height},{ctx:n,$canvas:i}=jt(r);if(!n)return;e.$placeholder.append(i);let o=m(e.connectionColor).toRgb(),s={connectionRgbColor:[o.r,o.g,o.b,o.a],particles:[],ctx:n,$canvas:i,vpParticlesNumber:ut(e)};s.particles=Mt(e,s);let l=za(()=>{let c=ut(e);s.vpParticlesNumber!==c&&(s.vpParticlesNumber=c,s.particles=Mt(e,s))},e.resizeDebounceTime);return ee({callback:()=>{n&&St(e,s)},restartOnResize:!0,resizeCallback:()=>{if(!e.$placeholder)return;let c=e.$placeholder.getBoundingClientRect();i.width=c.width,i.height=c.height,St(e,s),l()}}).start(),i};window.particles=Ot;var Pt=Ot;var Oa=()=>{let t=document.getElementById("hp-animation");t&&Pt({$placeholder:t,particlesNumber:100,minSize:10,maxSize:30,particlesColors:["#366d8c","#368c8c","#fffc00","#92fae7","#5daed2","#366d8c"],connectionColor:"#7fb2b7",svgPathData:["m14.5 21.75-8.52289 4.48075 1.62773-9.49038-6.89516-6.72112 9.52888-1.38462L14.5 0l4.26144 8.63463 9.52888 1.38462-6.89516 6.72112 1.62773 9.49038z","M14.5 21.75 4.24695 24.75305 7.25 14.5 4.24695 4.24695 14.5 7.25l10.25305-3.00305L21.75 14.5l3.00305 10.25305z","m14.5 21.75-5.54891 6.14625.42239-8.26973-8.26973.42239L7.25 14.5 1.10375 8.95109l8.26973.42239-.42239-8.26973L14.5 7.25l5.54891-6.14625-.42239 8.26973 8.26973-.42239L21.75 14.5l6.14625 5.54891-8.26973-.42239.42239 8.26973z"],rotate:!0,scaleInOut:!0,maxScale:1.2,minScale:.7,scaleStep:.005})},Pa=()=>{if(document.getElementById("special-page")){Oa();return}ot(),rt(),it(),st()};document.addEventListener("DOMContentLoaded",()=>{Pa()});})(); -//# sourceMappingURL=index.1710770887921.js.map +//# sourceMappingURL=index.1710779087934.js.map diff --git a/docs/js/index.1710770887921.js.map b/docs/js/index.1710779087934.js.map similarity index 100% rename from docs/js/index.1710770887921.js.map rename to docs/js/index.1710779087934.js.map diff --git a/docs/pages/interfaces.html b/docs/pages/interfaces.html index 606c6e9..ec4b48a 100644 --- a/docs/pages/interfaces.html +++ b/docs/pages/interfaces.html @@ -16,8 +16,8 @@ - - + + @@ -147,8 +147,8 @@ private static initZeroArray; private shuffle; private gradientDescent; - - train(): (number | number[])[]; + + fit(): (number | number[])[]; predict(features: number[]): number; // Statistics @@ -183,6 +183,6 @@ --> - + \ No newline at end of file diff --git a/docs/pages/linear-regression.html b/docs/pages/linear-regression.html index 01e39b1..5f52d73 100644 --- a/docs/pages/linear-regression.html +++ b/docs/pages/linear-regression.html @@ -16,8 +16,8 @@ - - + + @@ -136,7 +136,7 @@ batchSize: 2, // optional }); -const [weights, bias] = model.train(); // [[0.4855781005489537], 0.8483783596443771] +const [weights, bias] = model.fit(); // [[0.4855781005489537], 0.8483783596443771] const prediction = model.predict([17]); // 89 // The coefficient of determination R-Squared @@ -175,6 +175,6 @@ --> - + \ No newline at end of file diff --git a/src/core/linear-regression.ts b/src/core/linear-regression.ts index 54878b8..607ef15 100644 --- a/src/core/linear-regression.ts +++ b/src/core/linear-regression.ts @@ -130,7 +130,7 @@ export class LinearRegression { return [newWeights, newBias]; } - train() { + fit() { for(let i = 0; i < this.options.epochs; i++) { if (this.options.shuffle) { diff --git a/src/docs/data/pages/10-main/1-linear-regression.md b/src/docs/data/pages/10-main/1-linear-regression.md index bcfeab4..86e217c 100644 --- a/src/docs/data/pages/10-main/1-linear-regression.md +++ b/src/docs/data/pages/10-main/1-linear-regression.md @@ -15,7 +15,7 @@ const regression = new LinearRegression({ batchSize: 2, // optional }); -const [weights, bias] = model.train(); // [[0.4855781005489537], 0.8483783596443771] +const [weights, bias] = model.fit(); // [[0.4855781005489537], 0.8483783596443771] const prediction = model.predict([17]); // 89 // The coefficient of determination R-Squared diff --git a/src/docs/data/pages/10-main/2-interfaces.md b/src/docs/data/pages/10-main/2-interfaces.md index 0526423..7aa5858 100644 --- a/src/docs/data/pages/10-main/2-interfaces.md +++ b/src/docs/data/pages/10-main/2-interfaces.md @@ -26,8 +26,8 @@ export class LinearRegression { private static initZeroArray; private shuffle; private gradientDescent; - - train(): (number | number[])[]; + + fit(): (number | number[])[]; predict(features: number[]): number; // Statistics diff --git a/test/linear-regression.test.ts b/test/linear-regression.test.ts index 56b0a38..0cd5baa 100644 --- a/test/linear-regression.test.ts +++ b/test/linear-regression.test.ts @@ -15,7 +15,7 @@ describe('Linear Regression', () => { labels, }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); expect([weights, bias]).toStrictEqual([[0.7492312657204566], 0.17408378352957618]); expect(model.rSquared()).toStrictEqual(0.16455829885857165); @@ -33,7 +33,7 @@ describe('Linear Regression', () => { batchSize: 1, }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); expect([weights, bias]).toStrictEqual([[0.6381096434369923], 13.13201182243643]); expect(model.rSquared()).toStrictEqual(0.2393979741060026); @@ -51,7 +51,7 @@ describe('Linear Regression', () => { batchSize: 2, }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); expect([weights, bias]).toStrictEqual([[0.7558520339174016], 7.271072855627577]); expect(model.rSquared()).toStrictEqual(0.18940279995876763); @@ -72,7 +72,7 @@ describe('Linear Regression', () => { batchSize: 2, }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); expect([weights, bias]).toStrictEqual([[0.4855781005489537], 0.8483783596443771]); expect(model.predict([17])).toStrictEqual(9.10320606897659); @@ -88,7 +88,7 @@ describe('Linear Regression', () => { points, }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); expect([weights, bias]).toStrictEqual([0.7492312657204566, 0.17408378352957618]); });*/ diff --git a/test/node/linear-regression-mini-batch.js b/test/node/linear-regression-mini-batch.js index cb3701f..203fea2 100644 --- a/test/node/linear-regression-mini-batch.js +++ b/test/node/linear-regression-mini-batch.js @@ -40,7 +40,7 @@ const init = () => { } }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); const xData = features.map(arr => arr[0]); diff --git a/test/node/linear-regression-sgd.js b/test/node/linear-regression-sgd.js index ceaa594..8d2123e 100644 --- a/test/node/linear-regression-sgd.js +++ b/test/node/linear-regression-sgd.js @@ -40,7 +40,7 @@ const init = () => { } }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); const xData = features.map(arr => arr[0]); diff --git a/test/node/linear-regression.js b/test/node/linear-regression.js index e2e026d..9233547 100644 --- a/test/node/linear-regression.js +++ b/test/node/linear-regression.js @@ -39,7 +39,7 @@ const init = () => { } }); - const [weights, bias] = model.train(); + const [weights, bias] = model.fit(); const xData = features.map(arr => arr[0]);