{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Duffing-stuffing parameter estimation\n", "\n", "Goals:\n", "\n", " * Estimate parameters for a Duffing-like model such that it describes the behavior of the system with low error in different experimental schemes (varying resonnance frequencies, degradation, etc.).\n", " * Simulate the system and perform various analyses (sensitivity, stability, etc.)\n", " \n", "\n", "## Table of contents\n", "\n", " 1. [Empirical data](#empiric-data)\n", " 2. [Model](#model)\n", " 3. [Loss function](#loss-function)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%matplotlib notebook\n", "\n", "import os\n", "import numpy as np\n", "import random\n", "import itertools\n", "from tqdm import tqdm_notebook as tqdm\n", "from toolz import curry\n", "from scipy import signal\n", "from scipy.optimize import minimize\n", "from scipy.io import loadmat\n", "\n", "# PyDSTool requires scipy 0.X\n", "# However, solve_ivp was introduced in scipy 1.X.\n", "from scipy.integrate import odeint, solve_ivp\n", "#from pydstool_integrator import simulate as ds_simulate\n", "\n", "import matplotlib.pyplot as plt\n", "#from matplotlib import animation\n", "#plt.rcParams[\"animation.html\"] = \"jshtml\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "## Empirical data\n", "\n", "Data is measured through frequency scans at lab.\n", "\n", "Read data from `.mat`-files as a dict of numpy arrays. We focus primarily on the XY-trace data, containing a stable-state period of 100 observations per frequency, for 5 experiments total with variying resonnance frequencies." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def read_xy(matfile, experiment_no):\n", " \"\"\"Experiment number in [0, 5]\"\"\"\n", " xy = loadmat(matfile)['XYPost'][0, experiment_no]\n", " print(\"Variables (rows x observations): \", xy.dtype.names)\n", " xy_data = dict([(k, xy[i]) for i, k in enumerate(xy.dtype.names)])\n", " t_min, t_max = xy_data['t'][0,0], xy_data['t'][-1,0]\n", " f_min, f_max = xy_data['f'][0,0], xy_data['f'][-1,0]\n", " print(\"Resonnance frequencies: (%d, %d)\" % (xy_data['XResfFreq'][0,0], xy_data['YResfFreq'][0,0]))\n", " print(\"Resonnance amplitudes: (%.2f, %.2f)\" % (xy_data['XResAmp'][0,0], xy_data['YResAmp'][0,0]))\n", " print(\"T = %.2f\" % (t_max - t_min,))\n", " print(\"t in [%.2f, %.2f]\" % (t_min, t_max))\n", " print(\"f in [%.1f, %.1f]\" % (f_min, f_max))\n", " print(\"x shape: %d x %d\" % xy_data['x'].shape)\n", " print(\"y shape: %d x %d\" % xy_data['y'].shape)\n", " return xy_data\n", "\n", "\n", "def read_amp(matfile):\n", " ds_name = os.path.splitext(os.path.basename(matfile))[0]\n", " print(\"Reading ds '%s'\" % ds_name)\n", " amp = loadmat(matfile)[ds_name]\n", " _, n_vars = amp.shape\n", " amp_data = dict([(amp[0,i][1][0][0][0], amp[0,i][0][:,0]) for i in range(n_vars)])\n", " print(\"Variables: %s\" % ','.join(amp_data.keys()))\n", " return amp_data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Read first experiment\n", "print(\"Reading first experiment\")\n", "xy_data = read_xy('data/XYPost.mat', 0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "#amp_pre = read_amp('data/APre.mat')\n", "#amp_post = read_amp('data/APost.mat')\n", "#amp_postb = read_amp('data/APostB.mat')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Plotting\n", "\n", "Three types of plots:\n", "\n", " * Frequency scan with amplitude mean/std.\n", " * Trajectory plot in (x, y)-plane.\n", " * Trajectory over time for x and y." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def plot_std_freqscan(xy_data):\n", " linewidth = 0.6\n", " fig, (ax11, ax12) = plt.subplots(\n", " nrows=1,\n", " ncols=2,\n", " figsize=(8, 4)\n", " )\n", " rows = list(filter(\n", " lambda r: xy_data['f'][r,0] != 0,\n", " range(xy_data['f'].shape[0])\n", " ))\n", " ax11.errorbar(\n", " xy_data['f'][rows,0],\n", " np.mean(xy_data['x'][rows, :], axis=1),\n", " np.std(xy_data['x'][rows, :], axis=1),\n", " linewidth=linewidth\n", " )\n", " ax11.axvline(x=xy_data['XResfFreq'][0,0], linestyle='--', color='red', linewidth=0.8)\n", " ax11.axvline(x=xy_data['YResfFreq'][0,0], linestyle='--', color='red', linewidth=0.8)\n", " ax11.set_xlabel(r'$f$')\n", " ax11.set_ylabel(r'$x$')\n", " ax12.errorbar(\n", " xy_data['f'][rows,0],\n", " np.mean(xy_data['y'][rows, :], axis=1),\n", " np.std(xy_data['y'][rows, :], axis=1),\n", " linewidth=linewidth\n", " )\n", " ax12.axvline(x=xy_data['XResfFreq'][0,0], linestyle='--', color='red', linewidth=0.8)\n", " ax12.axvline(x=xy_data['YResfFreq'][0,0], linestyle='--', color='red', linewidth=0.8)\n", " ax12.set_xlabel(r'$f$')\n", " ax12.set_ylabel(r'$y$')\n", " plt.suptitle(\"Amplitude mean and standard deviation per frequency\\nRed lines are resonnance frequencies\")\n", " #plt.tight_layout()\n", " \n", " \n", "def plot_xy(rows, xy_data):\n", " linewidth = 0.6\n", " fig, ((ax11, ax12), (ax21, ax22)) = plt.subplots(\n", " nrows=2,\n", " ncols=2,\n", " figsize=(8, 6)\n", " )\n", " for row in rows:\n", " ax11.plot(xy_data['x'][row, :], linewidth=linewidth)\n", " ax12.plot(xy_data['y'][row, :], linewidth=linewidth)\n", " ax21.plot(xy_data['x'][row, :], xy_data['y'][row, :], linewidth=linewidth)\n", " ax22.plot(xy_data['f'][row, :])\n", " \n", " ax11.set_ylabel(r'$x$')\n", " ax12.set_ylabel(r'$y$')\n", " ax21.set_xlabel(r'$x$')\n", " ax21.set_ylabel(r'$y$')\n", " ax22.set_ylabel(r'$f$')\n", " plt.suptitle(\"XY-data plots for given frequencies\")\n", " #plt.tight_layout()\n", " \n", " \n", "def plot_xyt(rows, xy_data, normalizer=lambda x: x, sim_xy_data=None):\n", " N = len(rows)\n", " linewidth = 0.6\n", " fig, axes = plt.subplots(\n", " nrows=N,\n", " ncols=2,\n", " #sharey=True,\n", " #sharex=True,\n", " figsize=(8, 3*N)\n", " )\n", " for i in range(N):\n", " axes[i,0].plot(normalizer(xy_data['x'][rows[i], :]), linewidth=linewidth)\n", " axes[i,1].plot(normalizer(xy_data['y'][rows[i], :]), linewidth=linewidth)\n", " if sim_xy_data is not None:\n", " axes[i,0].plot(normalizer(sim_xy_data['x'][rows[i], :]), linewidth=linewidth)\n", " axes[i,1].plot(normalizer(sim_xy_data['y'][rows[i], :]), linewidth=linewidth)\n", " axes[i,0].set_ylabel('$x$ ($f$=%d)' % xy_data['f'][rows[i], 0])\n", " axes[i,1].set_ylabel('$y$ ($f$=%d)' % xy_data['f'][rows[i], 0])\n", " plt.suptitle(\"Stable-state XY-data plots for given frequencies\")\n", " #plt.tight_layout()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "# Plot freqency scans (mean +- variation)\n", "rows = [50, 65, 70, 75, 80, 85, 90, 100]\n", "for exp_no in range(5):\n", " xy_data = read_xy('data/XYPost.mat', exp_no)\n", " plot_std_freqscan(xy_data)\n", " plot_xy(rows, xy_data)\n", " plot_xyt(rows, xy_data)\n", " break" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "## Model\n", "\n", "Model derived from Duffing equations:\n", "\n", "\\begin{align}\n", "m_1 \\ddot{y}_1 &= F_1 - \\dot{y}_1(c_1 + c_3) + \\dot{y}_2c_3 - y_1(k_1 + k_3) + y_2k_3 - \\alpha_1y_1^3 + \\alpha_3(y_2 - y_1)^3 \\\\\n", "m_2 \\ddot{y}_2 &= F_2 - \\dot{y}_2(c_2 + c_3) + \\dot{y}_1c_3 - x_2(k_2 + k_3) + y_1k_3 - \\alpha_2y_2^3 + \\alpha_3(y_2 - y_1)^3 \\\\\n", "\\end{align}\n", "\n", "where $F = Ce^{i\\omega t}$. Transform to first-order form by variable substitutions $x_3 = \\dot{y}_1, x_1 = y_1$ and $x_4 = \\dot{y}_2, x_2 = y_2$:\n", "\n", "\\begin{align}\n", "\\dot{x}_1 &= x_3 \\\\\n", "\\dot{x}_2 &= x_4 \\\\\n", "m_1\\dot{x}_3 &= F_1 - x_3(c_1 + c_3) + x_4c_3 - x_1(k_1 + k_3) + x_2k_3 - \\alpha_1x_1^3 + \\alpha_3(x_2 - x_1)^3 \\\\\n", "m_2\\dot{x}_4 &= F_2 - x_4(c_2 + c_3) + x_3c_3 - x_2(k_2 + k_3) + x_1k_3 - \\alpha_2x_2^3 + \\alpha_3(x_2 - x_1)^3 \\\\\n", "\\end{align}\n", "\n", "\n", "For some reason, this model doesn't work out when working backwards from resonnance frequencies. I may be missing something obvious, otherwise the fact that mass goes into the estimations may mess things up.\n", "\n", "\n", "### Transformed model\n", "\n", "Eliminate mass, + easier to reason about physical constants:\n", "\n", "\\begin{align}\n", "\\dot{x}_1 &= x_3 \\\\\n", "\\dot{x}_2 &= x_4 \\\\\n", "\\dot{x}_3 &= \\frac{1}{m_1}F_1 - x_3(c_1 + c_3) + x_4c_3 - x_1(k_1 + k_3) + x_2k_3 - \\alpha_1x_1^3 + \\alpha_3(x_2 - x_1)^3 \\\\\n", "\\dot{x}_4 &= \\frac{1}{m_2}F_2 - x_4(c_2 + c_3) + x_3c_3 - x_2(k_2 + k_3) + x_1k_3 - \\alpha_2x_2^3 + \\alpha_3(x_2 - x_1)^3 \\\\\n", "\\end{align}\n", "\n", "With the Jacobian $\\mathbf{J} = \\frac{\\partial \\mathbf{f}}{\\partial \\mathbf{X}}$\n", "\n", "\\begin{bmatrix}\n", "0 & 0 & 1 & 0 \\\\\n", "0 & 0 & 0 & 1 \\\\\n", "-k_1 - k_3 - 3\\alpha_1x_1^2 - 3\\alpha_3(x_2 - x_1)^2 & k_3 + 3\\alpha_3(x_2 - x_1)^2 & -c_1 - c_3 & c_3 \\\\\n", "k_3 - 3\\alpha_3(x_2 - x_1)^2 & -k_2 - k_3 - 3\\alpha_2x_2^2 + 3\\alpha_3(x_2 - x_1)^2 & c_3 & -c_2 - c_3\n", "\\end{bmatrix}\n", "\n", "Use the harmonic oscillator identities\n", "\n", " * Undamped angular frequency:\n", "\n", "\\begin{align}\n", "\\omega_0 &= \\sqrt{\\frac{k}{m}}\n", "\\end{align}\n", "\n", " * Damping ratio:\n", "\n", "\\begin{align}\n", "\\zeta &= \\frac{c}{2\\sqrt{mk}}\n", "\\end{align}\n", "\n", " * Resonant freqency:\n", "\n", "\\begin{align}\n", "\\omega_r &= \\omega_0\\sqrt{1-2\\zeta^2}, \\zeta < \\frac{1}{\\sqrt{2}}\n", "\\end{align}\n", "\n", "and express the constants subject to estimation as\n", "\n", "\\begin{align}\n", "c_1 &= 2 \\zeta_1 \\omega_{0,1} \\\\\n", "c_2 &= 2 \\zeta_2 \\omega_{0,2} \\\\\n", "c_3 &= g_c(c_1, c_2) \\\\\n", "k_1 &= \\omega_{0,1}^2 \\\\\n", "k_2 &= \\omega_{0,2}^2 \\\\\n", "k_3 &= g_k(k_1, k_2) \\\\\n", "\\alpha_1 &= f_1(\\mathbf{X} ; \\theta) \\\\\n", "\\alpha_2 &= f_2(\\mathbf{X} ; \\theta) \\\\\n", "\\alpha_3 &= g_{\\alpha}(\\alpha_1, \\alpha_2)\n", "\\end{align}\n", "\n", "\n", "With the model expressed this way, things make sense and we get resonnance where it should be.\n", "\n", "\n", "### Notes on solvers\n", "\n", " * We use SciPy's `solve_ivp` to simulate the system. Different methods (RK45, LSODA, Radeau) has been tested with no noticable differences.\n", " * The standard `odeint` from SciPy is super shit. It easily diverges and is unstable. They claim to use the standard LSODA solver (same as `solve_ivp` with method='LSODA') but the results are entirely different.\n", " * Once we hit the right parameters, the simulation is considerable slower because these are adaptive solvers.\n", " * There is an ODE implementation from the [PyDSTool package](https://github.com/robclewley/pydstool) which compiles to C and is much much faster.\n", " \n", "#### PyDSTool solver\n", "\n", "The model is implemented with this solver. It's slightly faster, but notoriously more complicated to use. Besides, it requires SciPy version `<1.0` which is not compatible with the rest of this code. Let's stick to scipy's modern solvers." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@curry\n", "def model1(omega, p, t, X):\n", " x1, x2, xd1, xd2 = X\n", " C1, C2, m1, m2, c1, c2, c3, k1, k2, k3, a1, a2, a3 = p\n", " F1 = C1*np.cos(omega*t)\n", " F2 = C2*np.cos(omega*t)\n", " xdd1 = F1 - xd1*(c1 + c3) + xd2*c3 - x1*(k1 + k3) + x2*k3 - a1*x1**3 + a3*(x2 - x1)**3\n", " xdd2 = F2 - xd2*(c2 + c3) + xd1*c3 - x2*(k2 + k3) + x1*k3 - a2*x2**3 + a3*(x2 - x1)**3\n", " return xd1, xd2, m1*xdd1, m2*xdd2\n", "\n", "@curry\n", "def model2(omega, p, t, X):\n", " x1, x2, xd1, xd2 = X\n", " C1, C2, m1, m2, c1, c2, c3, k1, k2, k3, a1, a2, a3 = p\n", " xdd1 = C1*np.cos(omega*t)/m1 - xd1*(c1 + c3) + xd2*c3 - x1*(k1 + k3) + x2*k3 - a1*x1**3 + a3*(x2 - x1)**3\n", " xdd2 = C2*np.cos(omega*t)/m2 - xd2*(c2 + c3) + xd1*c3 - x2*(k2 + k3) + x1*k3 - a2*x2**3 + a3*(x2 - x1)**3\n", " return xd1, xd2, xdd1, xdd2\n", "\n", "@curry\n", "def jacobian2(omega, p, t, X):\n", " x1, x2, xd1, xd2 = X\n", " C1, C2, m1, m2, c1, c2, c3, k1, k2, k3, a1, a2, a3 = p\n", " return np.array([\n", " [ 0 , 0 , 1 , 0 ],\n", " [ 0 , 0 , 0 , 1 ],\n", " [-k1-k3-3*a1*x1**2-3*a3*(x2-x1)**2, k3+3*a3*(x2-x1)**2 , -c1-c3 , c3 ],\n", " [ k3-3*a3*(x2-x1)**2 , -k2-k3-3*a2*x2**2+3*a3*(x2-x1)**2 , c3 , -c2-c3]\n", " ])\n", "\n", "def odeint_integrate(model, jac, dt, T, X0, rtol, atol):\n", " t = np.linspace(0, T, int(T/dt))\n", " X = odeint(model, X0, t, Dfun=jac, tfirst=True, rtol=rtol, atol=atol)\n", " return X.T\n", "\n", "def solve_ivp_integrate(model, jac, dt, T, X0, rtol, atol):\n", " t = np.linspace(0, T, int(T/dt))\n", " sol = solve_ivp(model, [0, T], X0, t_eval=t, jac=jac, method='Radau', first_step=dt, rtol=rtol, atol=atol)\n", " return sol.y\n", "\n", "# Rely on the same functional signatures by mimicing the data structure of lab measurement.\n", "def simulate_xy_data(integrator, rows, xy_data, T, t_trans, t_scale, steps, x0, v0, p, progress=True):\n", " sim_xy_data = {\n", " 'x': np.zeros((101, 100)),\n", " 'y': np.zeros((101, 100)),\n", " 'f': np.zeros((101, 100)),\n", " 'XResfFreq': xy_data['XResfFreq'],\n", " 'YResfFreq': xy_data['YResfFreq']\n", " }\n", " _rows = tqdm(rows) if progress else rows\n", " for i in _rows:\n", " f = xy_data['f'][i,0]\n", " omega = 2*np.pi*f/t_scale\n", " X = integrator(\n", " model2(omega, p),\n", " jacobian2(omega, p),\n", " (T-t_trans)/steps,\n", " T,\n", " x0 + v0,\n", " 1e-3,\n", " [1e-4, 1e-4, 1e-2, 1e-2]\n", " )\n", " x1, x2, xd1, xd2 = X\n", " sim_xy_data['x'][i,:] = x1[-steps:]\n", " sim_xy_data['y'][i,:] = x2[-steps:]\n", " sim_xy_data['f'][i,:] = f\n", " return sim_xy_data\n", "\n", "def simulate_experiment(xy_data, rows, zeta1, zeta2, gc, gk, ga, f1, f2, verbose=True):\n", " # Amplitude at resonnance should be 1\n", " # Aim for better numerical stability by setting C and m to approx. the same numeric precision\n", " t_scale = 1\n", " C1 = 1.5e7/t_scale\n", " C2 = 1.5e7/t_scale\n", " m1 = 1\n", " m2 = 1\n", "\n", " # Fetch resonnance frequencies from data\n", " f_r1 = xy_data['XResfFreq']/t_scale\n", " f_r2 = xy_data['YResfFreq']/t_scale\n", " omega_r1 = 2*np.pi*f_r1\n", " omega_r2 = 2*np.pi*f_r2\n", " omega_01 = omega_r1/(np.sqrt(1-2*zeta1**2))\n", " omega_02 = omega_r2/(np.sqrt(1-2*zeta2**2))\n", "\n", " # Compute parameters from identities\n", " c1 = 2*zeta1*omega_01\n", " c2 = 2*zeta2*omega_02\n", " c3 = gc(c1, c2)\n", " k1 = omega_01**2\n", " k2 = omega_02**2\n", " k3 = gk(k1, k2)\n", " a1 = f1(k1, k2)\n", " a2 = f2(k1, k2)\n", " a3 = ga(k1, k2)\n", "\n", " p = (\n", " C1, C2,\n", " m1, m2,\n", " c1, c2, c3,\n", " k1, k2, k3,\n", " a1, a2, a3\n", " )\n", "\n", " if verbose:\n", " print(\"Parameters:\")\n", " print(\"Omega_r 1 = %.3f\" % omega_r1)\n", " print(\"Omega_r 2 = %.3f\" % omega_r2)\n", " print(\"Omega0 1 = %.3f\" % omega_01)\n", " print(\"Omega0 2 = %.3f\" % omega_02)\n", " print(\"c1 = %.3f\" % c1)\n", " print(\"c2 = %.3f\" % c2)\n", " print(\"c3 = %.3f\" % c3)\n", " print(\"k1 = %.3f\" % k1)\n", " print(\"k2 = %.3f\" % k2)\n", " print(\"k3 = %.3f\" % k3)\n", " print(\"a1 = %.3f\" % a1)\n", " print(\"a2 = %.3f\" % a2)\n", " print(\"a3 = %.3f\" % a3)\n", "\n", " # Start from any state, the system stabilize quickly\n", " x0, v0 = (-0.002, 0.01), (-0.004, 0.03)\n", " #x0, v0 = (0.0, 0.0), (0.0, 0.0)\n", " \n", " # Set transient period to 0.1 seconds and simulate 100 steps over 0.5 seconds\n", " t_trans = 0.1*t_scale\n", " T = t_trans + 0.5*t_scale\n", " steps = 100\n", "\n", " # Simulate mostly around resonnance\n", " return simulate_xy_data(solve_ivp_integrate, rows, xy_data, T, t_trans, t_scale, steps, x0, v0, p, progress=verbose)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Harmonic oscillator\n", "\n", "Set $c_3 = k_3 = \\alpha_1 = \\alpha_2 = \\alpha_3 = 0$ for simulating a standard driven harmonic oscillator with no coupling between x- and y-components. Assume damping $\\zeta_1 = \\zeta_2 = 0.1$ and use resonnance frequencies from lab data to estimate parameters." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "exp_no = 0\n", "print(\"Read data, experiment %d\" % exp_no)\n", "xy_data = read_xy('data/XYPost.mat', exp_no)\n", "\n", "def gc(c1, c2):\n", " return 0.0\n", "\n", "def gk(k1, k2):\n", " return 0.0\n", "\n", "def f1(k1, k2):\n", " return 0.0\n", "\n", "def f2(k1, k2):\n", " return 0.0\n", "\n", "def ga(k1, k2):\n", " return 0.0\n", "\n", "rows = [50, 65, 70, 75, 80, 85, 90, 100]\n", "zeta1 = 0.1\n", "zeta2 = 0.1\n", "xyhat_data = simulate_experiment(xy_data, rows, zeta1, zeta2, gc, gk, ga, f1, f2, verbose=True)\n", "\n", "plot_std_freqscan(xyhat_data)\n", "plot_xy(rows, xyhat_data)\n", "plot_xyt(rows, xy_data, sim_xy_data=xyhat_data)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### With duffing term\n", "\n", "The duffing term has to be pretty large to see any stiffening effect. Set $\\alpha_1 = 1500k_1$ and $\\alpha_2 = 1500k_2$ (still without coupling)." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "exp_no = 0\n", "print(\"Read data, experiment %d\" % exp_no)\n", "xy_data = read_xy('data/XYPost.mat', exp_no)\n", "\n", "def gc(c1, c2):\n", " return 0.0\n", "\n", "def gk(k1, k2):\n", " return 0.0\n", "\n", "def f1(k1, k2):\n", " return 1.5e3*k1\n", "\n", "def f2(k1, k2):\n", " return 1.5e3*k2\n", "\n", "def ga(k1, k2):\n", " return 0.0\n", "\n", "rows = [50, 65, 70, 75, 80, 85, 90, 100]\n", "zeta1 = 0.1\n", "zeta2 = 0.1\n", "xyhat_data = simulate_experiment(xy_data, rows, zeta1, zeta2, gc, gk, ga, f1, f2, verbose=True)\n", "\n", "plot_std_freqscan(xyhat_data)\n", "plot_xy(rows, xyhat_data)\n", "plot_xyt(rows, xy_data, sim_xy_data=xyhat_data)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### With coupling" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "exp_no = 0\n", "print(\"Read data, experiment %d\" % exp_no)\n", "xy_data = read_xy('data/XYPost.mat', exp_no)\n", "\n", "def gc(c1, c2):\n", " return 0.05*(c1 + c2)\n", "\n", "def gk(k1, k2):\n", " return 0.05*(k1 + k2)\n", "\n", "def f1(k1, k2):\n", " return 1.5e3*k1\n", "\n", "def f2(k1, k2):\n", " return 1.5e3*k2\n", "\n", "def ga(k1, k2):\n", " return 0.05*(a1 + a2)\n", "\n", "rows = [50, 65, 70, 75, 80, 85, 90, 100]\n", "zeta1 = 0.1\n", "zeta2 = 0.1\n", "xyhat_data = simulate_experiment(xy_data, rows, zeta1, zeta2, gc, gk, ga, f1, f2, verbose=True)\n", "\n", "plot_std_freqscan(xyhat_data)\n", "plot_xy(rows, xyhat_data)\n", "plot_xyt(rows, xy_data, sim_xy_data=xyhat_data)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "\n", "## Loss function\n", "\n", "Given the two multivariate signals, one empirical and one simulated, we need a distance metric $d(\\mathbf{S}(\\omega), \\hat{\\mathbf{S}}(\\omega))$ that quantifies the error of our simulation.\n", "\n", "Consider first a single frequency $\\mathbf{S}(\\omega=x) \\in \\mathbb{R}^2$. Treat each component individually, perform autocorrelation do find the shift, then simply use mean squared error as the distance metric between the two common periods of $\\mathbf{S}$ and $\\hat{\\mathbf{S}}$. We extend this to the multivariate case by simply averaging the loss for each frequency.\n", "\n", "TODO: Assert that both components of $\\mathbf{S}(\\omega=x)$ have the same shift." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def normalize_signal(s):\n", " return (s - np.mean(s)) / np.std(s)\n", "\n", "def autocorrelate_1d(s1, s2):\n", " corr = np.correlate(s1, s2, mode='same') / (np.linalg.norm(s1)*np.linalg.norm(s2))\n", " corr_half = corr[int(len(s1)/2):]\n", " idx = np.argmax(corr_half)\n", " return idx, corr_half[idx]\n", "\n", "def loss_1d(s1, s2, normalize=True):\n", " assert len(s1) == len(s2)\n", " N = len(s1)\n", " if normalize:\n", " _s1 = normalize_signal(s1)\n", " _s2 = normalize_signal(s2)\n", " else:\n", " _s1 = s1\n", " _s2 = s2\n", " idx, coeff = autocorrelate_1d(_s1, _s2)\n", " return idx, coeff, np.mean((_s1[idx:]-_s2[:N-idx])**2)\n", "\n", "def xy_loss(rows, xy_data, xyhat_data, normalize=True, verbose=False):\n", " # Calculate correlation coefficients and MSE for both x and y for the specified set of rows\n", " x_idxs, x_coeffs, x_mses = zip(*[loss_1d(xy_data['x'][i,:], xyhat_data['x'][i,:], normalize=normalize) for i in rows])\n", " y_idxs, y_coeffs, y_mses = zip(*[loss_1d(xy_data['y'][i,:], xyhat_data['y'][i,:], normalize=normalize) for i in rows])\n", " # Print some statistics\n", " if verbose:\n", " print('\\n'.join(map(\n", " lambda var: \"%s: %.4f mean, %.4f std\" % (var[0], np.mean(var[1]), np.std(var[1])),\n", " [\n", " ('X idx', x_idxs),\n", " ('Y idx', y_idxs),\n", " ('X coeffs', x_coeffs),\n", " ('Y coeffs', y_coeffs),\n", " ('X MSEs', x_mses),\n", " ('Y MSEs', y_mses)\n", " ]\n", " )))\n", " # Return the sum of means of both components\n", " return np.mean(x_mses) + np.mean(y_mses)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Loss function test\n", "\n", "Random signals and sines." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "# Test loss function\n", "t = np.linspace(0, 100, 100)\n", "s1 = {\n", " 'f': np.array([\n", " np.ones((100,)),\n", " 2*np.ones((100,))\n", " ]),\n", " 'x': np.array([\n", " np.random.randn(100),\n", " np.array([1*np.sin(i/np.pi) for i in t]),\n", " ]),\n", " 'y': np.array([\n", " np.random.randn(100),\n", " np.array([1*np.cos(i/np.pi) for i in t]),\n", " ])\n", "}\n", "s2 = {\n", " 'f': np.array([\n", " np.ones((100,)),\n", " 2*np.ones((100,))\n", " ]),\n", " 'x': np.array([\n", " np.random.randn(100),\n", " np.array([1*np.cos(i/np.pi) for i in t]),\n", " ]),\n", " 'y': np.array([\n", " np.random.randn(100),\n", " np.array([1*np.sin(i/np.pi) for i in t]),\n", " ])\n", "}\n", "\n", "plot_xyt([0,1], s1, sim_xy_data=s2)\n", "\n", "print(\"Loss: %.4f\\n\" % xy_loss([0], s1, s2, verbose=True))\n", "print(\"Loss: %.4f\\n\" % xy_loss([1], s1, s2, verbose=True))\n", "print(\"Loss: %.4f\" % xy_loss([0, 1], s1, s2, verbose=True))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Loss for model\n", "\n", "Plotting normalized signals." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "print(\"Loss: %.4f\\n\" % xy_loss(rows, xy_data, xyhat_data, verbose=True))\n", "plot_xyt(rows, xy_data, normalizer=normalize_signal, sim_xy_data=xyhat_data)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "#C_grid = [1e1] # Amplitude of driving force (gamma)\n", "#c_grid = [1e2] # Damping (delta)\n", "#a_grid = [1e-1] # Non-linear restoring force (beta)\n", "#k_grid = [1e6] # Linear stiffness (alpha)\n", "\n", "#xy_data = read_xy('data/XYPost.mat', 0)\n", "#rows = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n", "#rows = [10, 65, 70, 75, 80, 85, 90]\n", "\n", "#losses = []\n", "#best_loss = 1e9\n", "#best_p = None\n", "#for c, a, k, C in tqdm.tqdm(itertools.product(*[c_grid, a_grid, k_grid, C_grid])):\n", "# print(\"c1=c2=c3=%.4f, a1=a2=a3=%.4f, k1=k2=k3=%.4f, C1=C2=%.4f\" % (c, a, k, C))\n", "# _damping = 0.001\n", "# _omegar = 8e3\n", "# _omega0 = _omegar/(np.sqrt(1-2*_damping**2))\n", "# C = 1e5\n", "# c2 = 2*_damping*_omega0\n", "# k2 = _omega0**2\n", "# a = 0.5\n", "# p = (C, C, 0.03*c2, c2, 0.01*c2, 0.01*a, 0.9*a, 0.02*a, 0.5*k2, k2, 0.1*k2)\n", "# #p = (C, C, c, c, c, a, a, a, k, k, k)\n", "# xyhat_data = simulate_xy_data(rows, xy_data, p)\n", "# loss = xy_loss(rows, xy_data, xyhat_data, verbose=True)\n", "# losses.append(loss)\n", "# if loss < best_loss:\n", "# best_loss = loss\n", "# best_p = p\n", "# print(\"Loss: %.4f\\n\" % loss)\n", "# plot_xyt(rows, xy_data, sim_xy_data=xyhat_data)\n", "# break" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@curry\n", "def objective(omega, p):\n", " obj = 0.0\n", " # Simulate and return MSE(xy_data, sim_xy_data)\n", " return obj\n", "\n", "p0 = (0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1)\n", "bounds = [\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2),\n", " (0.1, 0.2)\n", "]\n", "#omegas = xy_data['f'][:,0].tolist()\n", "#solution = minimize(objective(omega), p0, method='SLSQP', bounds=bounds)\n", "#p = solution.x\n", "\n", "# Simulate with updated values\n", "#t, X, dt, pstep = model(T, t_trans, dt_per_period, x0, v0, omega, p)" ] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.5.2" } }, "nbformat": 4, "nbformat_minor": 1 }